23 Commits

Author SHA1 Message Date
Lucas Asvio
860af3e3e6 kernel: ipq806x: refresh qca-nss patches to kernel 5.15.150 2024-06-02 08:28:57 +02:00
Andrew Robbins
3aeed60ac2 refresh patches for 5.15.127 2024-06-02 08:22:14 +02:00
Lucas Asvio
2a0040bc2f add KONG sqm scripts 2024-06-02 08:21:13 +02:00
Lucas Asvio
ca27452218 fix feeds 2024-06-02 08:18:52 +02:00
ACwifidude
1851893fcc fix qca-mcs-support patch 2024-05-30 09:07:19 +02:00
ACwifidude
5fd687b09f fix socinfo patch 2024-05-30 09:07:19 +02:00
ACwifidude
20c102c07f Revert "kernel: remove obsolete netfilter tcp window size check bypass patch"
This reverts commit 75e78bcaab.
2024-05-30 09:07:19 +02:00
ACwifidude
809318c93b Add onhub NSS support 2024-05-30 09:07:19 +02:00
ACwifidude
80c37c5970 fix mac80211 NSS patch 2024-05-30 09:07:19 +02:00
Takashi ISHIKAWA
863f3f806f ipq806x: fix l2tpv2 and tunipip6 NSS offloading, add WG2600HP support
ipq806x: add NEC WG2600HP NSS offloading support
ipq806x: fix qca-nss-l2tpv2 compile
ipq806x: fix qca-nss-tunipip6 lock
ipq806x: fix kernel panic at /sys/kernel/debug/qcom_socinfo/*/*

Signed-off-by: Takashi ISHIKAWA <tishi-github@tthy.org>
2024-05-30 09:07:19 +02:00
ACwifidude
22d49eaba8 dmac clean range patch 2024-05-30 09:07:19 +02:00
Qosmio
5e1e173b15 ipq806x: Fix incorrect pointer type .parent_names to parent_data
drivers/clk/qcom/gcc-ipq806x.c:3195:33: error: initialization of 'const char * const*' from incompatible pointer type 'const struct clk_parent_data *' [-Werror=incompatible-pointer-types]
 3195 |                 .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
      |                                 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2024-05-30 09:07:19 +02:00
Qosmio
860434ceab ipq806x: Fix Reference to non-existent node or label "smb208_s1b"
ERROR: Input tree has errors, aborting (use -f to force output)

arch/arm/boot/dts/qcom-ipq8064.dtsi:1686.14-1698.5: ERROR (phandle_references): /soc/amba/nss-common: Reference to non-existent node or label "smb208_s1b"
2024-05-30 09:07:18 +02:00
ACwifidude
166f3a0980 ipq806x: NSS Hardware Offloading dts patch 2024-05-30 09:07:18 +02:00
Lucas Asvio
d1b746c41b Add NSS Package Feed 2024-05-30 09:07:13 +02:00
ACwifidude
db19a189a9 ipq806x: Optimize CPU up threshold 2024-05-30 09:06:19 +02:00
ACwifidude
177a873d5e ipq806x: NSS Hardware Offloading mac80211 support 2024-05-30 09:06:19 +02:00
ACwifidude
16cedb7b05 ipq806x: NSS Hardware Offloading iproute2 patches 2024-05-30 09:06:19 +02:00
ACwifidude
e7304ec775 ipq806x: NSS Hardware Offloading additional patches 2024-05-30 09:06:19 +02:00
ACwifidude
c02323727e ipq806x: NSS Hardware Offloading qdisc patches 2024-05-30 09:06:19 +02:00
ACwifidude
fb83fad6c6 ipq806x: NSS Hardware Offloading Target Core and ECM patches 2024-05-30 09:06:19 +02:00
ACwifidude
2d3f7b5b26 ipq806x: NSS Hardware Offloading Target Files 2024-05-30 09:06:19 +02:00
ACwifidude
834a5e3a9c ipq806x: NSS Hardware Offloading Config5-15 2024-05-30 09:06:19 +02:00
43 changed files with 9687 additions and 8 deletions

View File

@@ -2,3 +2,5 @@ src-git packages https://git.openwrt.org/feed/packages.git^063b2393cbc3e5aab9d2b
src-git luci https://git.openwrt.org/project/luci.git^b07cf9dcfc37e021e5619a41c847e63afbd5d34a
src-git routing https://git.openwrt.org/feed/routing.git^648753932d5a7deff7f2bdb33c000018a709ad84
src-git telephony https://git.openwrt.org/feed/telephony.git^86af194d03592121f5321474ec9918dd109d3057
src-git nss https://github.com/ACwifidude/nss-packages.git;NSS-11.2-K5.15
src-git sqm_scripts_nss https://github.com/rickkdotnet/sqm-scripts-nss.git

View File

@@ -78,6 +78,7 @@ config-$(CONFIG_PACKAGE_CFG80211_TESTMODE) += NL80211_TESTMODE
config-$(call config_package,mac80211) += MAC80211
config-$(CONFIG_PACKAGE_MAC80211_MESH) += MAC80211_MESH
config-$(CONFIG_PACKAGE_MAC80211_NSS_SUPPORT) += MAC80211_NSS_SUPPORT
include ath.mk
include broadcom.mk
@@ -121,7 +122,7 @@ define KernelPackage/mac80211
$(call KernelPackage/mac80211/Default)
TITLE:=Linux 802.11 Wireless Networking Stack
# +kmod-crypto-cmac is a runtime only dependency of net/mac80211/aes_cmac.c
DEPENDS+= +kmod-cfg80211 +kmod-crypto-cmac +kmod-crypto-ccm +kmod-crypto-gcm +hostapd-common
DEPENDS+= +kmod-cfg80211 +kmod-crypto-cmac +kmod-crypto-ccm +kmod-crypto-gcm +hostapd-common +PACKAGE_kmod-qca-nss-drv:kmod-qca-nss-drv
KCONFIG:=\
CONFIG_AVERAGE=y
FILES:= $(PKG_BUILD_DIR)/net/mac80211/mac80211.ko
@@ -132,6 +133,16 @@ endef
define KernelPackage/mac80211/config
if PACKAGE_kmod-mac80211
if PACKAGE_kmod-qca-nss-drv
config PACKAGE_MAC80211_NSS_SUPPORT
bool "Enable NSS support for IPQ platform"
default y
help
This option enables support for NSS in boards
like Netgear R7800.
endif
config PACKAGE_MAC80211_DEBUGFS
bool "Export mac80211 internals in DebugFS"
select KERNEL_DEBUG_FS
@@ -273,9 +284,12 @@ ifeq ($(BUILD_VARIANT),smallbuffers)
C_DEFINES+= -DCONFIG_ATH10K_SMALLBUFFERS
endif
MAKE_OPTS:= \
$(subst -C $(LINUX_DIR),-C "$(PKG_BUILD_DIR)",$(KERNEL_MAKEOPTS)) \
EXTRA_CFLAGS="-I$(PKG_BUILD_DIR)/include $(IREMAP_CFLAGS) $(C_DEFINES)" \
C_DEFINES+= -DSTANDALONE_CT
MAKE_OPTS:= -C "$(PKG_BUILD_DIR)" \
$(KERNEL_MAKE_FLAGS) \
EXTRA_CFLAGS="-I$(PKG_BUILD_DIR)/include $(IREMAP_CFLAGS) $(C_DEFINES) \
-I$(STAGING_DIR)/usr/include/qca-nss-drv" \
KLIB_BUILD="$(LINUX_DIR)" \
MODPROBE=true \
KLIB=$(TARGET_MODULES_DIR) \

View File

@@ -0,0 +1,344 @@
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -19,6 +19,13 @@ comment "CFG80211 needs to be enabled fo
if MAC80211 != n
+config MAC80211_NSS_SUPPORT
+ bool "Enable NSS support for IPQ platform"
+ default n
+ ---help---
+ This option enables support for NSS in boards
+ like AP148.
+
config MAC80211_HAS_RC
bool
--- a/local-symbols
+++ b/local-symbols
@@ -39,6 +39,7 @@ LIB80211_CRYPT_CCMP=
LIB80211_CRYPT_TKIP=
LIB80211_DEBUG=
MAC80211=
+MAC80211_NSS_SUPPORT=
MAC80211_HAS_RC=
MAC80211_RC_MINSTREL=
MAC80211_RC_DEFAULT_MINSTREL=
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -36,6 +36,10 @@
extern const struct cfg80211_ops mac80211_config_ops;
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+#include <nss_api_if.h>
+#endif
+
struct ieee80211_local;
/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -1097,6 +1101,12 @@ struct ieee80211_sub_if_data {
} debugfs;
#endif
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ struct nss_virt_if_handle *nssctx;
+ struct sk_buff_head rx_queue;
+ struct work_struct rx_work;
+#endif
+
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -15,6 +15,7 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/module.h>
#include <linux/kcov.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
@@ -27,6 +28,12 @@
#include "wme.h"
#include "rate.h"
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+bool nss_redirect = true;
+module_param(nss_redirect, bool, 0644);
+MODULE_PARM_DESC(nss_redirect, "module param to enable NSS Redirect; 1-enable, 0-disable");
+#endif
+
/**
* DOC: Interface list locking
*
@@ -439,6 +446,64 @@ static int ieee80211_open(struct net_dev
return err;
}
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+/* This callback is registered for nss redirect to receive packet exceptioned from nss in Rx path.
+ * When packet does not match any of the ecm rules is redirected back here.
+ */
+void receive_from_nss(struct net_device *dev, struct sk_buff *sk_buff, struct napi_struct *napi)
+{
+ struct net_device *netdev;
+ struct sk_buff *skb;
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!dev) {
+ kfree(sk_buff);
+ return;
+ }
+
+ netdev = (struct net_device *)dev;
+ sdata = netdev_priv(netdev);
+ if (sdata->dev != dev) {
+ kfree(sk_buff);
+ return;
+ }
+ skb = (struct sk_buff *)sk_buff;
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ napi_gro_receive(napi, skb);
+}
+
+static int ieee80211_create_nss_virtif(struct ieee80211_sub_if_data *sdata, struct net_device *dev)
+{
+ if (sdata->nssctx != NULL) {
+ sdata_err(sdata, "Cannot create a NSS virtual interface. Already exists[n2h:%d, h2n:%d]!\n",
+ sdata->nssctx->if_num_n2h, sdata->nssctx->if_num_h2n);
+ return 1;
+ }
+
+ sdata->nssctx = NULL;
+ if (nss_redirect) {
+ sdata->nssctx = nss_virt_if_create_sync(dev);
+ if (sdata->nssctx) {
+ sdata_info(sdata, "Created a NSS virtual interface\n");
+ nss_virt_if_register(sdata->nssctx, receive_from_nss, sdata->dev);
+ }
+ else
+ sdata_err(sdata, "Failed to create a NSS virtual interface\n");
+ }
+
+ return 0;
+}
+
+static void ieee80211_destroy_nss_virtif(struct ieee80211_sub_if_data *sdata)
+{
+ if (sdata->nssctx) {
+ nss_virt_if_destroy_sync(sdata->nssctx);
+ sdata_info(sdata, "Destroyed NSS virtual interface\n");
+ }
+}
+#endif
+
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down)
{
struct ieee80211_local *local = sdata->local;
@@ -800,8 +865,24 @@ static void ieee80211_teardown_sdata(str
ieee80211_link_stop(&sdata->deflink);
}
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+static int ieee80211_init(struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ ieee80211_create_nss_virtif(sdata, dev);
+
+ return 0;
+}
+#endif
+
static void ieee80211_uninit(struct net_device *dev)
{
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ ieee80211_destroy_nss_virtif(sdata);
+#endif
ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
}
@@ -814,6 +895,9 @@ ieee80211_get_stats64(struct net_device
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ .ndo_init = ieee80211_init,
+#endif
.ndo_uninit = ieee80211_uninit,
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -33,6 +33,60 @@
#include "wme.h"
#include "rate.h"
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+extern bool nss_redirect;
+
+#define case_rtn_string(val) case val: return #val
+
+static const char *nss_tx_status_str(nss_tx_status_t status)
+{
+ switch (status) {
+ case_rtn_string(NSS_TX_SUCCESS);
+ case_rtn_string(NSS_TX_FAILURE);
+ case_rtn_string(NSS_TX_FAILURE_QUEUE);
+ case_rtn_string(NSS_TX_FAILURE_NOT_READY);
+ case_rtn_string(NSS_TX_FAILURE_TOO_LARGE);
+ case_rtn_string(NSS_TX_FAILURE_TOO_SHORT);
+ case_rtn_string(NSS_TX_FAILURE_NOT_SUPPORTED);
+ case_rtn_string(NSS_TX_FAILURE_BAD_PARAM);
+ case_rtn_string(NSS_TX_FAILURE_NOT_ENABLED);
+ case_rtn_string(NSS_TX_FAILURE_SYNC_BAD_PARAM);
+ case_rtn_string(NSS_TX_FAILURE_SYNC_TIMEOUT);
+ case_rtn_string(NSS_TX_FAILURE_SYNC_FW_ERR);
+ default:
+ return "Unknown NSS TX status";
+ }
+}
+
+static void netif_rx_nss(struct ieee80211_rx_data *rx,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ int ret;
+
+ if (!sdata->nssctx)
+ goto out;
+
+ /* NSS expects ethernet header in skb data so resetting here */
+ skb_push(skb, ETH_HLEN);
+ ret = nss_virt_if_tx_buf(sdata->nssctx, skb);
+ if (ret) {
+ if (net_ratelimit()) {
+ sdata_err(sdata, "NSS TX failed with error: %s\n",
+ nss_tx_status_str(ret));
+ }
+ goto out;
+ }
+
+ return;
+out:
+ if (rx->list)
+ list_add_tail(&skb->list, rx->list);
+ else
+ netif_receive_skb(skb);
+}
+#endif
+
/*
* monitor mode reception
*
@@ -2635,10 +2689,16 @@ static void ieee80211_deliver_skb_to_loc
ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
/* deliver to local stack */
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ if (likely(nss_redirect)) {
+ netif_rx_nss(rx, skb);
+ }
+#else
if (rx->list)
list_add_tail(&skb->list, rx->list);
else
netif_receive_skb(skb);
+#endif
}
}
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -36,6 +36,11 @@
#include "wme.h"
#include "rate.h"
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+#include <net/ip.h>
+#include <net/dsfield.h>
+#endif
+
/* misc utils */
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
@@ -1729,6 +1734,16 @@ static bool ieee80211_tx_frags(struct ie
return true;
}
} else {
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ if (skb_queue_len(&local->pending[q]) >= 1000) {
+ spin_unlock_irqrestore(
+ &local->queue_stop_reason_lock,
+ flags);
+ ieee80211_purge_tx_queue(&local->hw,
+ skbs);
+ return false;
+ }
+#endif
/*
* Since queue is stopped, queue up frames for
@@ -4448,6 +4463,35 @@ static void ieee80211_mlo_multicast_tx(s
kfree_skb(skb);
}
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+void ieee80211_xmit_nss_fixup(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ /* Packets from NSS does not have valid protocol, priority and other
+ * network stack values. Derive required parameters (priority
+ * and network_header) from payload for QoS header.
+ * XXX: Here the assumption is that packet are in 802.3 format.
+ * As of now priority is handled only for IPv4 and IPv6.
+ */
+
+ if (sdata->nssctx && likely(!skb->protocol)) {
+ skb_set_network_header(skb, 14);
+ switch (((struct ethhdr *)skb->data)->h_proto) {
+ case htons(ETH_P_IP):
+ skb->priority = (ipv4_get_dsfield(ip_hdr(skb)) &
+ 0xfc) >> 5;
+ break;
+ case htons(ETH_P_IPV6):
+ skb->priority = (ipv6_get_dsfield(ipv6_hdr(skb)) &
+ 0xfc) >> 5;
+ break;
+ }
+ }
+}
+#endif
+
/**
* ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
* @skb: packet to be sent
@@ -4461,6 +4505,10 @@ netdev_tx_t ieee80211_subif_start_xmit(s
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
const struct ethhdr *eth = (void *)skb->data;
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ ieee80211_xmit_nss_fixup(skb, dev);
+#endif
+
if (likely(!is_multicast_ether_addr(eth->h_dest)))
goto normal;
@@ -4646,6 +4694,10 @@ netdev_tx_t ieee80211_subif_start_xmit_8
struct ieee80211_key *key;
struct sta_info *sta;
+#ifdef CPTCFG_MAC80211_NSS_SUPPORT
+ ieee80211_xmit_nss_fixup(skb, dev);
+#endif
+
if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
kfree_skb(skb);
return NETDEV_TX_OK;

View File

@@ -0,0 +1,11 @@
--- a/Makefile
+++ b/Makefile
@@ -65,7 +65,7 @@ WFLAGS += -Wmissing-declarations -Wold-s
CFLAGS := $(WFLAGS) $(CCOPTS) -I../include -I../include/uapi $(DEFINES) $(CFLAGS)
YACCFLAGS = -d -t -v
-SUBDIRS=lib ip tc bridge misc netem genl man
+SUBDIRS=lib ip tc bridge misc genl devlink rdma
ifeq ($(HAVE_MNL),y)
SUBDIRS += tipc devlink rdma dcb vdpa
endif

View File

@@ -4,11 +4,11 @@
CFLAGS := $(WFLAGS) $(CCOPTS) -I../include -I../include/uapi $(DEFINES) $(CFLAGS)
YACCFLAGS = -d -t -v
-SUBDIRS=lib ip tc bridge misc netem genl man
-SUBDIRS=lib ip tc bridge misc genl devlink rdma
+SUBDIRS=lib ip tc bridge misc genl
ifeq ($(HAVE_MNL),y)
-SUBDIRS += tipc devlink rdma dcb vdpa
+SUBDIRS += devlink rdma
+SUBDIRS += tipc devlink rdma
endif
LIBNETLINK=../lib/libutil.a ../lib/libnetlink.a

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,243 @@
--- a/tc/Makefile
+++ b/tc/Makefile
@@ -55,6 +55,7 @@ TCMODULES += m_tunnel_key.o
TCMODULES += m_sample.o
TCMODULES += m_ct.o
TCMODULES += m_gate.o
+TCMODULES += m_nssmirred.o
TCMODULES += p_ip.o
TCMODULES += p_ip6.o
TCMODULES += p_icmp.o
--- /dev/null
+++ b/tc/m_nssmirred.c
@@ -0,0 +1,183 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <string.h>
+#include "utils.h"
+#include "tc_util.h"
+#include "tc_common.h"
+#include <linux/tc_act/tc_nssmirred.h>
+
+/*
+ * explain()
+ * API to print the explaination of nssmirred action statement's
+ * elements.
+ */
+static void explain(void)
+{
+ fprintf(stderr, "Usage: nssmirred redirect <dev TO_DEVICENAME fromdev FROM_DEVICENAME> \n");
+ fprintf(stderr, "where: \n");
+ fprintf(stderr, "\tTO_DEVICENAME is the devicename to redirect to\n");
+ fprintf(stderr, "\tFROM_DEVICENAME is the devicename to redirect from\n");
+}
+
+/*
+ * usage()
+ * API to show the usage of the nssmirred action.
+ */
+static void usage(void)
+{
+ explain();
+ exit(-1);
+}
+
+/*
+ * parse_nss_mirred()
+ * Parse and validate the nssmirred action statement.
+ */
+static int parse_nss_mirred(struct action_util *a, int *argc_p, char ***argv_p,
+ int tca_id, struct nlmsghdr *n)
+{
+ int idx, argc = *argc_p;
+ char **argv = *argv_p;
+ struct tc_nss_mirred p;
+ struct rtattr *tail;
+
+ if (argc < 0) {
+ fprintf(stderr, "nssmirred bad argument count %d. Try option \"help\"\n", argc);
+ goto error;
+ }
+
+ if (matches(*argv, "nssmirred")) {
+ fprintf(stderr, "nssmirred bad argument %s. Try option \"help\"\n", *argv);
+ goto error;
+ }
+
+ NEXT_ARG();
+ if (!matches(*argv, "help")) {
+ usage();
+ }
+
+ if (matches(*argv, "redirect")) {
+ fprintf(stderr, "nssmirred bad argument %s. Try option \"help\"\n", *argv);
+ goto error;
+ }
+
+ NEXT_ARG();
+ if (matches(*argv, "dev")) {
+ fprintf(stderr, "nssmirred: bad value %s. Try option \"help\"\n", *argv);
+ goto error;
+ }
+
+ NEXT_ARG();
+ memset(&p, 0, sizeof(struct tc_nss_mirred));
+ if ((idx = ll_name_to_index(*argv)) == 0) {
+ fprintf(stderr, "Cannot find to device \"%s\"\n", *argv);
+ goto error;
+ }
+
+ p.to_ifindex = idx;
+ NEXT_ARG();
+ if (matches(*argv, "fromdev")) {
+ fprintf(stderr, "nssmirred: bad value %s. Try option \"help\"\n", *argv);
+ goto error;
+ }
+
+ NEXT_ARG();
+ if ((idx = ll_name_to_index(*argv)) == 0) {
+ fprintf(stderr, "Cannot find from device \"%s\"\n", *argv);
+ goto error;
+ }
+
+ p.from_ifindex = idx;
+ p.action = TC_ACT_STOLEN;
+ tail = NLMSG_TAIL(n);
+ addattr_l(n, MAX_MSG, tca_id, NULL, 0);
+ addattr_l(n, MAX_MSG, TCA_NSS_MIRRED_PARMS, &p, sizeof (p));
+ tail->rta_len = (void *) NLMSG_TAIL(n) - (void *) tail;
+ argc--;
+ argv++;
+ *argc_p = argc;
+ *argv_p = argv;
+ return 0;
+
+error:
+ return -1;
+}
+
+/*
+ * print_nss_mirred()
+ * Print information related to nssmirred action.
+ */
+static int print_nss_mirred(struct action_util *au, FILE * f, struct rtattr *arg)
+{
+ struct tc_nss_mirred *p;
+ struct rtattr *tb[TCA_NSS_MIRRED_MAX + 1];
+ const char *from_dev, *to_dev;
+
+ if (arg == NULL) {
+ return -1;
+ }
+
+ parse_rtattr_nested(tb, TCA_NSS_MIRRED_MAX, arg);
+
+ if (tb[TCA_NSS_MIRRED_PARMS] == NULL) {
+ fprintf(f, "[NULL nssmirred parameters]");
+ goto error;
+ }
+
+ p = RTA_DATA(tb[TCA_NSS_MIRRED_PARMS]);
+ if ((from_dev = ll_index_to_name(p->from_ifindex)) == 0) {
+ fprintf(stderr, "Invalid interface (index: %d)\n", p->from_ifindex);
+ goto error;
+ }
+
+ if ((to_dev = ll_index_to_name(p->to_ifindex)) == 0) {
+ fprintf(stderr, "Invalid interface (index: %d)\n", p->to_ifindex);
+ goto error;
+ }
+
+ fprintf(f, "nssmirred (%s to device %s) stolen\n", from_dev, to_dev);
+ fprintf(f, "\tindex %d ref %d bind %d\n",p->index,p->refcnt,p->bindcnt);
+
+ if (show_stats) {
+ if (tb[TCA_NSS_MIRRED_TM]) {
+ struct tcf_t *tm = RTA_DATA(tb[TCA_NSS_MIRRED_TM]);
+ print_tm(f,tm);
+ }
+ }
+ return 0;
+
+error:
+ return -1;
+}
+
+/*
+ * nssmirred_action_util
+ * nssmirred action utility structure.
+ */
+struct action_util nssmirred_action_util = {
+ .id = "nssmirred",
+ .parse_aopt = parse_nss_mirred,
+ .print_aopt = print_nss_mirred,
+};
--- /dev/null
+++ b/include/linux/tc_act/tc_nssmirred.h
@@ -0,0 +1,44 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#ifndef __LINUX_TC_NSS_MIR_H
+#define __LINUX_TC_NSS_MIR_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+/*
+ * tc_nss_mirred
+ * Structure for nssmirred action.
+ */
+struct tc_nss_mirred {
+ tc_gen;
+ __u32 from_ifindex; /* ifindex of the port to be redirected from */
+ __u32 to_ifindex; /* ifindex of the port to be redirected to */
+};
+
+/*
+ * Types of nssmirred action parameters.
+ */
+enum {
+ TCA_NSS_MIRRED_UNSPEC,
+ TCA_NSS_MIRRED_TM,
+ TCA_NSS_MIRRED_PARMS,
+ __TCA_NSS_MIRRED_MAX
+};
+#define TCA_NSS_MIRRED_MAX (__TCA_NSS_MIRRED_MAX - 1)
+
+#endif /* __LINUX_TC_NSS_MIR_H */

View File

@@ -0,0 +1,73 @@
From: Felix Fietkau <nbd@nbd.name>
Subject: netfilter: optional tcp window check
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -31,6 +31,9 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Do not check the TCP window for incoming packets */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -476,6 +479,9 @@ static bool tcp_in_window(const struct n
s32 receiver_offset;
bool res, in_recv_win;
+ if (nf_ct_tcp_no_window_check)
+ return true;
+
/*
* Get the required data from the packet.
*/
@@ -1139,7 +1145,7 @@ int nf_conntrack_tcp_packet(struct nf_co
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
- else if (ct->proto.tcp.last_win == 0 &&
+ else if (!nf_ct_tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -25,6 +25,9 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
+/* Do not check the TCP window for incoming packets */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
static bool enable_hooks __read_mostly;
MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
module_param(enable_hooks, bool, 0000);
@@ -657,6 +660,7 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
+ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
__NF_SYSCTL_CT_LAST_SYSCTL,
};
@@ -993,6 +997,13 @@ static struct ctl_table nf_ct_sysctl_tab
.proc_handler = proc_dointvec_jiffies,
},
#endif
+ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .data = &nf_ct_tcp_no_window_check,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{}
};

View File

@@ -0,0 +1,83 @@
From: Felix Fietkau <nbd@nbd.name>
Subject: netfilter: optional tcp window check
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Christian 'Ansuel' Marangi <ansuelsmth@gmail.com>
---
net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -465,6 +465,9 @@ static bool tcp_in_window(struct nf_conn
s32 receiver_offset;
bool res, in_recv_win;
+ if (tn->tcp_no_window_check)
+ return true;
+
/*
* Get the required data from the packet.
*/
@@ -1191,7 +1194,7 @@ int nf_conntrack_tcp_packet(struct nf_co
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
- else if (ct->proto.tcp.last_win == 0 &&
+ else if (!tn->tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
@@ -1507,6 +1510,9 @@ void nf_conntrack_tcp_init_net(struct ne
*/
tn->tcp_be_liberal = 0;
+ /* Skip Windows Check */
+ tn->tcp_no_window_check = 0;
+
/* If it's non-zero, we turn off RST sequence number check */
tn->tcp_ignore_invalid_rst = 0;
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -633,6 +633,7 @@ enum nf_ct_sysctl_index {
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
@@ -848,6 +849,14 @@ static struct ctl_table nf_ct_sysctl_tab
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
[NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
.procname = "nf_conntrack_tcp_ignore_invalid_rst",
.maxlen = sizeof(u8),
@@ -1058,6 +1067,7 @@ static void nf_conntrack_standalone_init
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
+ XASSIGN(NO_WINDOW_CHECK, &tn->tcp_no_window_check);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -26,6 +26,7 @@ struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
u8 tcp_loose;
u8 tcp_be_liberal;
+ u8 tcp_no_window_check;
u8 tcp_max_retrans;
u8 tcp_ignore_invalid_rst;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)

View File

@@ -11,7 +11,9 @@ boot() {
# Effective only with ondemand
echo 600000 > /sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
echo 600000 > /sys/devices/system/cpu/cpufreq/policy1/scaling_min_freq
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 50 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 60 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
# 10 Seconds => 60 Seconds for sampling rate for frequency down scaling decisions
echo 25 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
# 50% => 25% CPU load threshold for up-scale to max CPU frequency
fi
}

View File

@@ -1,3 +1,4 @@
CONFIG_REGULATOR_NSS_VOLT=y
CONFIG_ALIGNMENT_TRAP=y
# CONFIG_APQ_GCC_8084 is not set
# CONFIG_APQ_MMCC_8084 is not set

View File

@@ -0,0 +1,24 @@
/*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __QCOM_NSS_VOL_SCALING_H
#define __QCOM_NSS_VOL_SCALING_H
#include <linux/regulator/consumer.h>
int nss_ramp_voltage(unsigned long rate, bool ramp_up);
#endif

View File

@@ -0,0 +1,112 @@
/*
**************************************************************************
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/* DSCP remark conntrack extension APIs. */
#ifndef _NF_CONNTRACK_DSCPREMARK_H
#define _NF_CONNTRACK_DSCPREMARK_H
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
/* Rule flags */
#define NF_CT_DSCPREMARK_EXT_DSCP_RULE_VALID 0x1
/* Rule validity */
#define NF_CT_DSCPREMARK_EXT_RULE_VALID 0x1
#define NF_CT_DSCPREMARK_EXT_RULE_NOT_VALID 0x0
/* Which QoS features are set flags */
#define NF_CT_DSCPREMARK_EXT_PRIO 0x1
#define NF_CT_DSCPREMARK_EXT_DSCP 0x2
#define NF_CT_DSCPREMARK_EXT_IGS_QOS 0x4
#define NF_CT_DSCPREMARK_EXT_MARK 0x8
/*
* DSCP remark conntrack extension structure.
*/
struct nf_ct_dscpremark_ext {
__u32 flow_priority; /* Original direction packet priority */
__u32 reply_priority; /* Reply direction packet priority */
__u32 flow_mark; /* Original direction packet mark */
__u32 reply_mark; /* Reply direction packet mark */
__u16 igs_flow_qos_tag; /* Original direction ingress packet priority */
__u16 igs_reply_qos_tag; /* Reply direction ingress packet priority */
__u8 flow_dscp; /* IP DSCP value for original direction */
__u8 reply_dscp; /* IP DSCP value for reply direction */
__u16 rule_flags; /* Rule Validity flags */
__u16 flow_set_flags; /* Original direction set flags */
__u16 return_set_flags; /* Reply direction set flags */
};
/*
* nf_ct_dscpremark_ext_find()
* Finds the extension data of the conntrack entry if it exists.
*/
static inline struct nf_ct_dscpremark_ext *
nf_ct_dscpremark_ext_find(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
return nf_ct_ext_find(ct, NF_CT_EXT_DSCPREMARK);
#else
return NULL;
#endif
}
/*
* nf_ct_dscpremark_ext_add()
* Adds the extension data to the conntrack entry.
*/
static inline
struct nf_ct_dscpremark_ext *nf_ct_dscpremark_ext_add(struct nf_conn *ct,
gfp_t gfp)
{
#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
struct nf_ct_dscpremark_ext *ncde;
ncde = nf_ct_ext_add(ct, NF_CT_EXT_DSCPREMARK, gfp);
if (!ncde)
return NULL;
return ncde;
#else
return NULL;
#endif
};
#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
extern int nf_conntrack_dscpremark_ext_init(void);
extern void nf_conntrack_dscpremark_ext_fini(void);
extern int nf_conntrack_dscpremark_ext_set_dscp_rule_valid(struct nf_conn *ct);
extern int
nf_conntrack_dscpremark_ext_get_dscp_rule_validity(struct nf_conn *ct);
#else
/*
* nf_conntrack_dscpremark_ext_init()
*/
static inline int nf_conntrack_dscpremark_ext_init(void)
{
return 0;
}
/*
* nf_conntrack_dscpremark_ext_fini()
*/
static inline void nf_conntrack_dscpremark_ext_fini(void)
{
}
#endif /* CONFIG_NF_CONNTRACK_DSCPREMARK_EXT */
#endif /* _NF_CONNTRACK_DSCPREMARK_H */

View File

@@ -0,0 +1,435 @@
/* Copyright (c) 2009-2014, 2016, 2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
#include <linux/of.h>
#define CPU_IPQ4018 272
#define CPU_IPQ4019 273
#define CPU_IPQ4028 287
#define CPU_IPQ4029 288
#define CPU_IPQ8062 201
#define CPU_IPQ8064 202
#define CPU_IPQ8066 203
#define CPU_IPQ8065 280
#define CPU_IPQ8068 204
#define CPU_IPQ8069 281
#define CPU_IPQ8074 323
#define CPU_IPQ8072 342
#define CPU_IPQ8076 343
#define CPU_IPQ8078 344
#define CPU_IPQ8070 375
#define CPU_IPQ8071 376
#define CPU_IPQ8072A 389
#define CPU_IPQ8074A 390
#define CPU_IPQ8076A 391
#define CPU_IPQ8078A 392
#define CPU_IPQ8070A 395
#define CPU_IPQ8071A 396
#define CPU_IPQ8172 397
#define CPU_IPQ8173 398
#define CPU_IPQ8174 399
#define CPU_IPQ6018 402
#define CPU_IPQ6028 403
#define CPU_IPQ6000 421
#define CPU_IPQ6010 422
#define CPU_IPQ6005 453
/* TBD the CHIP IDs */
#define CPU_IPQ5000 425
#define CPU_IPQ5010 426
#define CPU_IPQ5018 427
static inline const int* read_ipq_soc_version_major(void)
{
const int *prop;
prop = of_get_property(of_find_node_by_path("/"), "soc_version_major",
NULL);
return prop;
}
static inline int read_ipq_cpu_type(void)
{
const int *prop;
prop = of_get_property(of_find_node_by_path("/"), "cpu_type", NULL);
/*
* Return Default CPU type if "cpu_type" property is not found in DTSI
*/
if (!prop)
return CPU_IPQ8064;
return *prop;
}
static inline int cpu_is_ipq4018(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ4018;
#else
return 0;
#endif
}
static inline int cpu_is_ipq4019(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ4019;
#else
return 0;
#endif
}
static inline int cpu_is_ipq4028(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ4028;
#else
return 0;
#endif
}
static inline int cpu_is_ipq4029(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ4029;
#else
return 0;
#endif
}
static inline int cpu_is_ipq40xx(void)
{
#ifdef CONFIG_ARCH_QCOM
return cpu_is_ipq4018() || cpu_is_ipq4019() ||
cpu_is_ipq4028() || cpu_is_ipq4029();
#else
return 0;
#endif
}
static inline int cpu_is_ipq8062(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8062;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8064(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8064;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8066(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8066;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8068(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8068;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8065(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8065;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8069(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8069;
#else
return 0;
#endif
}
static inline int cpu_is_ipq806x(void)
{
#ifdef CONFIG_ARCH_QCOM
return cpu_is_ipq8062() || cpu_is_ipq8064() ||
cpu_is_ipq8066() || cpu_is_ipq8068() ||
cpu_is_ipq8065() || cpu_is_ipq8069();
#else
return 0;
#endif
}
static inline int cpu_is_ipq8070(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8070;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8071(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8071;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8072(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8072;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8074(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8074;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8076(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8076;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8078(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8078;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8072a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8072A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8074a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8074A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8076a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8076A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8078a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8078A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8070a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8070A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8071a(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8071A;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8172(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8172;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8173(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8173;
#else
return 0;
#endif
}
static inline int cpu_is_ipq8174(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ8174;
#else
return 0;
#endif
}
static inline int cpu_is_ipq6018(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ6018;
#else
return 0;
#endif
}
static inline int cpu_is_ipq6028(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ6028;
#else
return 0;
#endif
}
static inline int cpu_is_ipq6000(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ6000;
#else
return 0;
#endif
}
static inline int cpu_is_ipq6010(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ6010;
#else
return 0;
#endif
}
static inline int cpu_is_ipq6005(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ6005;
#else
return 0;
#endif
}
static inline int cpu_is_ipq5000(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ5000;
#else
return 0;
#endif
}
static inline int cpu_is_ipq5010(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ5010;
#else
return 0;
#endif
}
static inline int cpu_is_ipq5018(void)
{
#ifdef CONFIG_ARCH_QCOM
return read_ipq_cpu_type() == CPU_IPQ5018;
#else
return 0;
#endif
}
static inline int cpu_is_ipq807x(void)
{
#ifdef CONFIG_ARCH_QCOM
return cpu_is_ipq8072() || cpu_is_ipq8074() ||
cpu_is_ipq8076() || cpu_is_ipq8078() ||
cpu_is_ipq8070() || cpu_is_ipq8071() ||
cpu_is_ipq8072a() || cpu_is_ipq8074a() ||
cpu_is_ipq8076a() || cpu_is_ipq8078a() ||
cpu_is_ipq8070a() || cpu_is_ipq8071a() ||
cpu_is_ipq8172() || cpu_is_ipq8173() ||
cpu_is_ipq8174();
#else
return 0;
#endif
}
static inline int cpu_is_ipq60xx(void)
{
#ifdef CONFIG_ARCH_QCOM
return cpu_is_ipq6018() || cpu_is_ipq6028() ||
cpu_is_ipq6000() || cpu_is_ipq6010() ||
cpu_is_ipq6005();
#else
return 0;
#endif
}
static inline int cpu_is_ipq50xx(void)
{
#ifdef CONFIG_ARCH_QCOM
return cpu_is_ipq5000() || cpu_is_ipq5010() ||
cpu_is_ipq5018();
#else
return 0;
#endif
}
#endif /* _ARCH_ARM_MACH_MSM_SOCINFO_H_ */

View File

@@ -0,0 +1,36 @@
#ifndef __LINUX_TC_NSS_MIRRED_H
#define __LINUX_TC_NSS_MIRRED_H
#include <linux/pkt_cls.h>
/*
* Type of nss mirred action.
*/
#define TCA_ACT_MIRRED_NSS 17
/*
* Types of parameters for nss mirred action.
*/
enum {
TC_NSS_MIRRED_UNSPEC,
TC_NSS_MIRRED_TM,
TC_NSS_MIRRED_PARMS,
__TC_NSS_MIRRED_MAX
};
#define TC_NSS_MIRRED_MAX (__TC_NSS_MIRRED_MAX - 1)
/*
* tc_nss_mirred
* tc command structure for nss mirred action.
*/
struct tc_nss_mirred {
tc_gen; /* General tc structure. */
__u32 from_ifindex; /* ifindex of the port from which traffic
* will be redirected.
*/
__u32 to_ifindex; /* ifindex of the port to which traffic
* will be redirected.
*/
};
#endif /* __LINUX_TC_NSS_MIRRED_H */

View File

@@ -0,0 +1,92 @@
/*
**************************************************************************
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/* DSCP remark handling conntrack extension registration. */
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
/* DSCP remark conntrack extension type declaration */
static struct nf_ct_ext_type dscpremark_extend __read_mostly = {
.len = sizeof(struct nf_ct_dscpremark_ext),
.align = __alignof__(struct nf_ct_dscpremark_ext),
.id = NF_CT_EXT_DSCPREMARK,
};
/* nf_conntrack_dscpremark_ext_init()
* Initializes the DSCP remark conntrack extension.
*/
int nf_conntrack_dscpremark_ext_init(void)
{
int ret;
ret = nf_ct_extend_register(&dscpremark_extend);
if (ret < 0) {
pr_warn("nf_conntrack_dscpremark: Unable to register extension\n");
return ret;
}
return 0;
}
/* nf_conntrack_dscpremark_ext_set_dscp_rule_valid()
* Set DSCP rule validity flag in the extension
*/
int nf_conntrack_dscpremark_ext_set_dscp_rule_valid(struct nf_conn *ct)
{
struct nf_ct_dscpremark_ext *ncde;
ncde = nf_ct_dscpremark_ext_find(ct);
if (!ncde)
return -1;
ncde->rule_flags = NF_CT_DSCPREMARK_EXT_DSCP_RULE_VALID;
return 0;
}
EXPORT_SYMBOL(nf_conntrack_dscpremark_ext_set_dscp_rule_valid);
/* nf_conntrack_dscpremark_ext_get_dscp_rule_validity()
* Check if the DSCP rule flag is valid from the extension
*/
int nf_conntrack_dscpremark_ext_get_dscp_rule_validity(struct nf_conn *ct)
{
struct nf_ct_dscpremark_ext *ncde;
ncde = nf_ct_dscpremark_ext_find(ct);
if (!ncde)
return NF_CT_DSCPREMARK_EXT_RULE_NOT_VALID;
if (ncde->rule_flags & NF_CT_DSCPREMARK_EXT_DSCP_RULE_VALID)
return NF_CT_DSCPREMARK_EXT_RULE_VALID;
return NF_CT_DSCPREMARK_EXT_RULE_NOT_VALID;
}
EXPORT_SYMBOL(nf_conntrack_dscpremark_ext_get_dscp_rule_validity);
/* nf_conntrack_dscpremark_ext_fini()
* De-initializes the DSCP remark conntrack extension.
*/
void nf_conntrack_dscpremark_ext_fini(void)
{
nf_ct_extend_unregister(&dscpremark_extend);
}

View File

@@ -0,0 +1,92 @@
--- a/arch/arm/boot/dts/qcom-ipq8064-onhub.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064-onhub.dtsi
@@ -10,8 +10,6 @@
/ {
aliases {
- ethernet0 = &gmac0;
- ethernet1 = &gmac2;
mdio-gpio0 = &mdio;
serial0 = &gsbi4_serial;
};
@@ -109,6 +107,10 @@
};
};
+&adm_dma {
+ status = "okay";
+};
+
&qcom_pinmux {
rgmii0_pins: rgmii0_pins {
mux {
@@ -290,9 +292,22 @@
&gmac0 {
status = "okay";
- phy-mode = "rgmii";
- qcom,id = <0>;
- phy-handle = <&phy1>;
+ compatible = "qcom,nss-gmac";
+
+ phy-mode = "rgmii";
+ qcom,id = <0>;
+ qcom,pcs-chanid = <0>;
+ qcom,phy-mdio-addr = <1>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
+ qcom,phy_mii_type = <0>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <252>;
+ local-mac-address = [00 00 00 00 00 00];
+ mdiobus = <&mdio>;
pinctrl-0 = <&rgmii0_pins>;
pinctrl-names = "default";
@@ -305,9 +320,22 @@
&gmac2 {
status = "okay";
- phy-mode = "sgmii";
- qcom,id = <2>;
- phy-handle = <&phy0>;
+ compatible = "qcom,nss-gmac";
+
+ phy-mode = "sgmii";
+ qcom,id = <2>;
+ qcom,pcs-chanid = <1>;
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
+ qcom,phy_mii_type = <1>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <258>;
+ local-mac-address = [00 00 00 00 00 00];
+ mdiobus = <&mdio>;
fixed-link {
speed = <1000>;
@@ -407,8 +435,6 @@
ath10k@0,0 {
reg = <0 0 0 0 0>;
device_type = "pci";
- qcom,ath10k-sa-gpio = <2 3 4 0>;
- qcom,ath10k-sa-gpio-func = <5 5 5 0>;
};
};
};
@@ -426,8 +452,6 @@
ath10k@0,0 {
reg = <0 0 0 0 0>;
device_type = "pci";
- qcom,ath10k-sa-gpio = <2 3 4 0>;
- qcom,ath10k-sa-gpio-func = <5 5 5 0>;
};
};
};

View File

@@ -0,0 +1,628 @@
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -1279,6 +1279,12 @@
snps,blen = <16 0 0 0 0 0 0>;
};
+ nss-gmac-common {
+ compatible = "qcom,nss-gmac-common";
+ reg = <0x03000000 0x0000FFFF 0x1bb00000 0x0000FFFF 0x00900000 0x00004000>;
+ reg-names = "nss_reg_base", "qsgmii_reg_base", "clk_ctl_base";
+ };
+
gmac0: ethernet@37000000 {
device_type = "network";
compatible = "qcom,ipq806x-gmac", "snps,dwmac";
@@ -1489,6 +1495,131 @@
regulator-always-on;
};
+ nss0: nss@40000000 {
+ compatible = "qcom,nss";
+ qcom,low-frequency = <733000000>; /* orig value 110000000 */
+ qcom,mid-frequency = <733000000>; /* orig value 550000000 */
+ qcom,max-frequency = <733000000>;
+
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x36000000 0x1000 0x39000000 0x10000>;
+ reg-names = "nphys", "vphys";
+ clocks = <&gcc NSS_CORE_CLK>, <&gcc NSSTCM_CLK_SRC>,
+ <&gcc NSSTCM_CLK>, <&rpmcc RPM_NSS_FABRIC_0_CLK>,
+ <&rpmcc RPM_NSS_FABRIC_1_CLK>;
+ clock-names = "nss-core-clk", "nss-tcm-src",
+ "nss-tcm-clk", "nss-fab0-clk",
+ "nss-fab1-clk";
+ resets = <&gcc UBI32_CORE1_CLKRST_CLAMP_RESET>,
+ <&gcc UBI32_CORE1_CLAMP_RESET>,
+ <&gcc UBI32_CORE1_AHB_RESET>,
+ <&gcc UBI32_CORE1_AXI_RESET>;
+ reset-names = "clkrst-clamp", "clamp", "ahb", "axi";
+
+ qcom,id = <0>;
+ qcom,num-irq = <2>;
+ qcom,num-queue = <2>;
+ qcom,load-addr = <0x40000000>;
+ qcom,turbo-frequency;
+
+ qcom,bridge-enabled;
+ qcom,gre-enabled;
+ qcom,gre-redir-enabled;
+ qcom,gre_tunnel_enabled;
+ qcom,ipv4-enabled;
+ qcom,ipv4-reasm-enabled;
+ qcom,ipv6-enabled;
+ qcom,ipv6-reasm-enabled;
+ qcom,l2tpv2-enabled;
+ qcom,map-t-enabled;
+ qcom,pppoe-enabled;
+ qcom,pptp-enabled;
+ qcom,portid-enabled;
+ qcom,shaping-enabled;
+ qcom,tun6rd-enabled;
+ qcom,tunipip6-enabled;
+ qcom,vlan-enabled;
+ qcom,wlan-dataplane-offload-enabled;
+ qcom,wlanredirect-enabled;
+ qcom,pxvlan-enabled;
+ qcom,vxlan-enabled;
+ qcom,match-enabled;
+ qcom,mirror-enabled;
+ qcom,rmnet-enabled;
+ qcom,clmap-enabled;
+ };
+
+ nss1: nss@40800000 {
+ compatible = "qcom,nss";
+ qcom,low-frequency = <733000000>; /* orig value 110000000 */
+ qcom,mid-frequency = <733000000>; /* orig value 550000000 */
+ qcom,max-frequency = <733000000>;
+
+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x36400000 0x1000 0x39010000 0x10000>;
+ reg-names = "nphys", "vphys";
+ resets = <&gcc UBI32_CORE2_CLKRST_CLAMP_RESET>,
+ <&gcc UBI32_CORE2_CLAMP_RESET>,
+ <&gcc UBI32_CORE2_AHB_RESET>,
+ <&gcc UBI32_CORE2_AXI_RESET>;
+ reset-names = "clkrst-clamp", "clamp", "ahb", "axi";
+
+ qcom,id = <1>;
+ qcom,num-irq = <2>;
+ qcom,load-addr = <0x40800000>;
+ qcom,num-queue = <2>;
+ qcom,turbo-frequency;
+
+ qcom,capwap-enabled;
+ qcom,crypto-enabled;
+ qcom,dtls-enabled;
+ qcom,ipsec-enabled;
+ };
+
+ crypto1: crypto@38000000 {
+ compatible = "qcom,nss-crypto";
+ reg = <0x38000000 0x20000>, <0x38004000 0x22000>;
+ reg-names = "crypto_pbase", "bam_base";
+ clocks = <&gcc CE5_CORE_CLK>, <&gcc CE5_A_CLK>, <&gcc CE5_H_CLK>;
+ clock-names = "ce5_core", "ce5_aclk", "ce5_hclk";
+ resets = <&gcc CRYPTO_ENG1_RESET>, <&gcc CRYPTO_AHB_RESET>;
+ reset-names = "rst_eng", "rst_ahb";
+ qcom,id = <0>;
+ qcom,ee = <0>;
+ };
+
+ crypto2: crypto@38400000 {
+ compatible = "qcom,nss-crypto";
+ reg = <0x38400000 0x20000>, <0x38404000 0x22000>;
+ reg-names = "crypto_pbase", "bam_base";
+ resets = <&gcc CRYPTO_ENG2_RESET>;
+ reset-names = "rst_eng";
+ qcom,id = <1>;
+ qcom,ee = <0>;
+ };
+
+ crypto3: crypto@38800000 {
+ compatible = "qcom,nss-crypto";
+ reg = <0x38800000 0x20000>, <0x38804000 0x22000>;
+ reg-names = "crypto_pbase", "bam_base";
+ resets = <&gcc CRYPTO_ENG3_RESET>;
+ reset-names = "rst_eng";
+ qcom,id = <2>;
+ qcom,ee = <0>;
+ };
+
+ crypto4: crypto@38c00000 {
+ compatible = "qcom,nss-crypto";
+ reg = <0x38c00000 0x20000>, <0x38c04000 0x22000>;
+ reg-names = "crypto_pbase", "bam_base";
+ resets = <&gcc CRYPTO_ENG4_RESET>;
+ reset-names = "rst_eng";
+ qcom,id = <3>;
+ qcom,ee = <0>;
+ };
+
sdcc1bam: dma@12402000 {
compatible = "qcom,bam-v1.3.0";
reg = <0x12402000 0x8000>;
@@ -1553,6 +1684,20 @@
dmas = <&sdcc3bam 2>, <&sdcc3bam 1>;
dma-names = "tx", "rx";
};
+
+ nss-common {
+ compatible = "qcom,nss-common";
+ reg = <0x03000000 0x00001000>;
+ reg-names = "nss_fpb_base";
+ clocks = <&gcc NSS_CORE_CLK>, <&gcc NSSTCM_CLK>,
+ <&rpmcc RPM_NSS_FABRIC_0_CLK>, <&rpmcc RPM_NSS_FABRIC_1_CLK>;
+ clock-names = "nss_core_clk", "nss_tcm_clk",
+ "nss-fab0-clk", "nss-fab1-clk";
+ nss_core-supply = <&smb208_s1b>;
+ nss_core_vdd_nominal = <1100000>;
+ nss_core_vdd_high = <1150000>;
+ nss_core_threshold_freq = <733000000>;
+ };
};
sfpb_mutex: hwlock@1200600 {
@@ -1563,3 +1708,27 @@
};
};
};
+
+ &gmac1 {
+ compatible = "qcom,nss-gmac";
+ qcom,id = <0>;
+ qcom,pcs-chanid = <0>;
+ qcom,phy_mii_type = <0>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ mdiobus = <&mdio0>;
+ };
+
+ &gmac2 {
+ compatible = "qcom,nss-gmac";
+ qcom,id = <1>;
+ qcom,pcs-chanid = <1>;
+ qcom,phy_mii_type = <1>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ mdiobus = <&mdio0>;
+ };
--- a/arch/arm/boot/dts/qcom-ipq8064-v2.0.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064-v2.0.dtsi
@@ -22,6 +22,15 @@
reg = <0x41200000 0x300000>;
no-map;
};
+
+ ramoops@42100000 {
+ compatible = "ramoops";
+ reg = <0x42100000 0x40000>;
+ record-size = <0x4000>;
+ console-size = <0x4000>;
+ ftrace-size = <0x4000>;
+ pmsg-size = <0x4000>;
+ };
};
};
--- a/arch/arm/boot/dts/qcom-ipq8064-eax500.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064-eax500.dtsi
@@ -198,10 +198,17 @@
0x00094 0x4e /* PORT6_STATUS */
>;
};
+
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
};
&gmac1 {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
phy-mode = "rgmii";
qcom,id = <1>;
@@ -217,6 +224,9 @@
&gmac2 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
phy-mode = "sgmii";
qcom,id = <2>;
--- a/arch/arm/boot/dts/qcom-ipq8064-ea8500.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ea8500.dts
@@ -111,18 +111,3 @@
reg = <4>;
};
};
-
-&gmac1 {
- qcom,phy_mdio_addr = <4>;
- qcom,poll_required = <1>;
- qcom,rgmii_delay = <0>;
- qcom,emulation = <0>;
-};
-
-/* LAN */
-&gmac2 {
- qcom,phy_mdio_addr = <0>; /* none */
- qcom,poll_required = <0>; /* no polling */
- qcom,rgmii_delay = <0>;
- qcom,emulation = <0>;
-};
--- a/arch/arm/boot/dts/qcom-ipq8064-ad7200-c2600.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064-ad7200-c2600.dtsi
@@ -357,6 +357,9 @@
&gmac1 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
phy-mode = "rgmii";
qcom,id = <1>;
@@ -375,6 +378,9 @@
&gmac2 {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
phy-mode = "sgmii";
qcom,id = <2>;
--- a/arch/arm/boot/dts/qcom-ipq8064-r7500.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-r7500.dts
@@ -274,6 +274,9 @@
&gmac1 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
phy-mode = "rgmii";
qcom,id = <1>;
@@ -291,6 +294,9 @@
&gmac2 {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
phy-mode = "sgmii";
qcom,id = <2>;
--- a/arch/arm/boot/dts/qcom-ipq8064-r7500v2.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-r7500v2.dts
@@ -357,6 +357,9 @@
&gmac1 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
phy-mode = "rgmii";
qcom,id = <1>;
@@ -374,6 +377,9 @@
&gmac2 {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
phy-mode = "sgmii";
qcom,id = <2>;
--- a/arch/arm/boot/dts/qcom-ipq8064-d7800.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-d7800.dts
@@ -360,6 +360,9 @@
&gmac1 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
phy-mode = "rgmii";
qcom,id = <1>;
@@ -377,6 +380,9 @@
&gmac2 {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
phy-mode = "sgmii";
qcom,id = <2>;
--- a/arch/arm/boot/dts/qcom-ipq8064-g10.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-g10.dts
@@ -122,12 +122,24 @@
&gmac1 {
status = "okay";
-
- pinctrl-0 = <&rgmii2_pins>;
- pinctrl-names = "default";
-
+ compatible = "qcom,nss-gmac";
+ reg = <0x37200000 0x200000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "rgmii";
qcom,id = <1>;
+ qcom,pcs-chanid = <0>;
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
+ qcom,phy_mii_type = <0>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <255>;
+ mdiobus = <&mdio0>;
+ pinctrl-0 = <&rgmii2_pins>;
+ pinctrl-names = "default";
fixed-link {
speed = <1000>;
@@ -137,9 +149,22 @@
&gmac2 {
status = "okay";
-
+ compatible = "qcom,nss-gmac";
+ reg = <0x37400000 0x200000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "sgmii";
qcom,id = <2>;
+ qcom,pcs-chanid = <1>;
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>; /* no polling */
+ qcom,rgmii-delay = <0>;
+ qcom,phy_mii_type = <1>;
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <258>;
+ mdiobus = <&mdio0>;
fixed-link {
speed = <1000>;
@@ -154,6 +179,9 @@
&mdio0 {
status = "okay";
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
pinctrl-0 = <&mdio0_pins>;
pinctrl-names = "default";
@@ -170,10 +198,14 @@
0x00094 0x4e /* PORT6_STATUS */
>;
};
+
};
&nand {
status = "okay";
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
nand@0 {
reg = <0>;
@@ -207,6 +239,7 @@
qcom,ath10k-calibration-variant = "ASRock-G10";
};
};
+
};
&pcie1 {
--- a/arch/arm/boot/dts/qcom-ipq8065.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8065.dtsi
@@ -95,3 +95,15 @@
opp-level = <2>;
};
};
+
+ &nss0 {
+ qcom,low-frequency = <800000000>;
+ qcom,mid-frequency = <800000000>;
+ qcom,max-frequency = <800000000>;
+ };
+
+ &nss1 {
+ qcom,low-frequency = <800000000>;
+ qcom,mid-frequency = <800000000>;
+ qcom,max-frequency = <800000000>;
+ };
--- a/arch/arm/boot/dts/qcom-ipq8065-nighthawk.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8065-nighthawk.dtsi
@@ -344,14 +344,23 @@
&gmac1 {
status = "okay";
-
+ compatible = "qcom,nss-gmac";
+ reg = <0x37200000 0x200000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "rgmii";
qcom,id = <1>;
+ qcom,pcs-chanid = <0>;
qcom,phy_mdio_addr = <4>;
qcom,poll_required = <0>;
qcom,rgmii_delay = <1>;
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
qcom,phy_mii_type = <0>;
qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
qcom,irq = <255>;
mdiobus = <&mdio0>;
@@ -369,14 +378,23 @@
&gmac2 {
status = "okay";
-
+ compatible = "qcom,nss-gmac";
+ reg = <0x37400000 0x200000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "sgmii";
qcom,id = <2>;
+ qcom,pcs-chanid = <1>;
qcom,phy_mdio_addr = <0>; /* none */
qcom,poll_required = <0>; /* no polling */
qcom,rgmii_delay = <0>;
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
qcom,phy_mii_type = <1>;
qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
qcom,irq = <258>;
mdiobus = <&mdio0>;
--- a/arch/arm/boot/dts/qcom-ipq8064-ea7500-v1.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ea7500-v1.dts
@@ -89,3 +89,16 @@
reg = <0x6080000 0x1f80000>;
};
};
+
+&gmac1 {
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
+};
+
+/* LAN */
+&gmac2 {
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
+};
--- a/arch/arm/boot/dts/qcom-ipq8065-rt4230w-rev6.dts
+++ b/arch/arm/boot/dts/qcom-ipq8065-rt4230w-rev6.dts
@@ -311,10 +311,28 @@
0x00054 0xc832c832 /* LED_CTRL_1 */
>;
};
+
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ qca,ar8327-initvals = <
+ 0x000e4 0x6a545 /* MAC_POWER_SEL */
+ 0x0000c 0x80 /* PAD6_MODE */
+ >;
+ };
};
&gmac0 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ qcom,id = <0>;
+ qcom,pcs-chanid = <0>;
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ mdiobus = <&mdio0>;
+ qcom,socver = <0>;
phy-mode = "rgmii";
qcom,id = <0>;
@@ -332,6 +350,16 @@
&gmac1 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ qcom,id = <1>;
+ qcom,pcs-chanid = <1>;
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ mdiobus = <&mdio0>;
+ qcom,socver = <0>;
phy-mode = "sgmii";
qcom,id = <1>;
--- a/arch/arm/boot/dts/qcom-ipq8065-nbg6817.dts
+++ b/arch/arm/boot/dts/qcom-ipq8065-nbg6817.dts
@@ -284,13 +284,23 @@
&gmac1 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ reg = <0x37200000 0x200000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "rgmii";
qcom,id = <1>;
+ qcom,pcs-chanid = <0>;
qcom,phy_mdio_addr = <4>;
qcom,poll_required = <0>;
qcom,rgmii_delay = <1>;
+ qcom,phy-mdio-addr = <4>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <1>;
qcom,phy_mii_type = <0>;
qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
qcom,irq = <255>;
mdiobus = <&mdio0>;
@@ -305,13 +315,23 @@
&gmac2 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ reg = <0x37400000 0x200000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
phy-mode = "sgmii";
qcom,id = <2>;
+ qcom,pcs-chanid = <1>;
qcom,phy_mdio_addr = <0>; /* none */
qcom,poll_required = <0>; /* no polling */
qcom,rgmii_delay = <0>;
+ qcom,phy-mdio-addr = <0>;
+ qcom,poll-required = <0>;
+ qcom,rgmii-delay = <0>;
qcom,phy_mii_type = <1>;
qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
qcom,irq = <258>;
mdiobus = <&mdio0>;

View File

@@ -0,0 +1,79 @@
--- a/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
@@ -1,4 +1,4 @@
-#include "qcom-ipq8064-v1.0.dtsi"
+#include "qcom-ipq8064-v1.0-smb208.dtsi"
/ {
model = "Qualcomm Technologies, Inc. IPQ8064/AP-148";
--- a/arch/arm/boot/dts/qcom-ipq8064-ap161.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ap161.dts
@@ -1,4 +1,4 @@
-#include "qcom-ipq8064-v1.0.dtsi"
+#include "qcom-ipq8064-v1.0-smb208.dtsi"
/ {
model = "Qualcomm IPQ8064/AP161";
--- a/arch/arm/boot/dts/qcom-ipq8064-db149.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-db149.dts
@@ -1,4 +1,4 @@
-#include "qcom-ipq8064-v1.0.dtsi"
+#include "qcom-ipq8064-v1.0-smb208.dtsi"
/ {
model = "Qualcomm IPQ8064/DB149";
--- a/arch/arm/boot/dts/qcom-ipq8064-r7500.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-r7500.dts
@@ -1,4 +1,4 @@
-#include "qcom-ipq8064-v1.0.dtsi"
+#include "qcom-ipq8064-v1.0-smb208.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/soc/qcom,tcsr.h>
--- a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include "qcom-ipq8064.dtsi"
+#include "qcom-ipq8064-smb208.dtsi"
#include <dt-bindings/input/input.h>
/ {
--- a/arch/arm/boot/dts/qcom-ipq8064-wpq864.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-wpq864.dts
@@ -5,7 +5,7 @@
* All rights reserved.
*/
-#include "qcom-ipq8064-v1.0.dtsi"
+#include "qcom-ipq8064-v1.0-smb208.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/soc/qcom,tcsr.h>
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq8064-v1.0-smb208.dtsi
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "qcom-ipq8064-v1.0.dtsi"
+
+&rpm {
+ smb208_regulators: regulators {
+ compatible = "qcom,rpm-smb208-regulators";
+
+ smb208_s1a: s1a {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1150000>;
+
+ qcom,switch-mode-frequency = <1200000>;
+ };
+
+ smb208_s1b: s1b {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1150000>;
+
+ qcom,switch-mode-frequency = <1200000>;
+ };
+
+ };
+};

View File

@@ -0,0 +1,215 @@
From c70758d96b22e4421a6afd824cb59e350c6a8040 Mon Sep 17 00:00:00 2001
From: Robert Marko <robert.marko@sartura.hr>
Date: Tue, 2 Jun 2020 22:09:15 +0200
Subject: [PATCH] Regulator: Add NSS VOLT
Signed-off-by: Robert Marko <robert.marko@sartura.hr>
---
drivers/regulator/Kconfig | 7 +++++++
drivers/regulator/Makefile | 1 +
2 files changed, 8 insertions(+)
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1423,5 +1423,12 @@ config REGULATOR_QCOM_LABIBB
boost regulator and IBB can be used as a negative boost regulator
for LCD display panel.
+config REGULATOR_NSS_VOLT
+ bool "Qualcomm IPQ806X NSS Voltage regulator"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This driver provides support for the Qualcomm IPQ806X NSS Voltage
+ regulator.
+
endif
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -171,5 +171,6 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
+obj-$(CONFIG_REGULATOR_NSS_VOLT) += nss-volt-ipq806x.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
--- /dev/null
+++ b/drivers/regulator/nss-volt-ipq806x.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/nss-volt-ipq806x.h>
+
+struct nss_data {
+ struct regulator *nss_reg;
+ u32 nss_core_vdd_nominal;
+ u32 nss_core_vdd_high;
+ u32 nss_core_threshold_freq;
+};
+
+static struct nss_data *data;
+
+int nss_ramp_voltage(unsigned long rate, bool ramp_up)
+{
+ int ret;
+ int curr_uV, uV;
+ struct regulator *reg;
+
+ if (!data) {
+ pr_err("NSS core regulator not init.\n");
+ return -ENODEV;
+ }
+
+ reg = data->nss_reg;
+
+ if (!reg) {
+ pr_err("NSS core regulator not found.\n");
+ return -EINVAL;
+ }
+
+ uV = data->nss_core_vdd_nominal;
+ if (rate >= data->nss_core_threshold_freq)
+ return data->nss_core_vdd_high;
+
+ curr_uV = regulator_get_voltage(reg);
+
+ if (ramp_up) {
+ if (uV <= curr_uV)
+ return 0;
+ } else {
+ if (uV >= curr_uV)
+ return 0;
+ }
+
+ ret = regulator_set_voltage(reg, uV, data->nss_core_vdd_high);
+ if (ret)
+ pr_err("NSS volt scaling failed (%d)\n", uV);
+
+ return ret;
+}
+
+static const struct of_device_id nss_ipq806x_match_table[] = {
+ { .compatible = "qcom,nss-common" },
+ {}
+};
+
+static int nss_volt_ipq806x_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nss_reg = devm_regulator_get(&pdev->dev, "nss_core");
+ ret = PTR_ERR_OR_ZERO(data->nss_reg);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev,
+ "nss_core regulator not ready, retry\n");
+ else
+ dev_err(&pdev->dev, "no regulator for nss_core: %d\n",
+ ret);
+
+ return ret;
+ }
+
+ if (of_property_read_u32(np, "nss_core_vdd_nominal",
+ &data->nss_core_vdd_nominal)) {
+ pr_warn("NSS core vdd nominal not found. Using defaults...\n");
+ data->nss_core_vdd_nominal = 1100000;
+ }
+
+ if (of_property_read_u32(np, "nss_core_vdd_high",
+ &data->nss_core_vdd_high)) {
+ pr_warn("NSS core vdd high not found. Using defaults...\n");
+ data->nss_core_vdd_high = 1150000;
+ }
+
+ if (of_property_read_u32(np, "nss_core_threshold_freq",
+ &data->nss_core_threshold_freq)) {
+ pr_warn("NSS core thres freq not found. Using defaults...\n");
+ data->nss_core_threshold_freq = 733000000;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static struct platform_driver nss_ipq806x_driver = {
+ .probe = nss_volt_ipq806x_probe,
+ .driver = {
+ .name = "nss-volt-ipq806x",
+ .owner = THIS_MODULE,
+ .of_match_table = nss_ipq806x_match_table,
+ },
+};
+
+static int __init nss_ipq806x_init(void)
+{
+ return platform_driver_register(&nss_ipq806x_driver);
+}
+late_initcall(nss_ipq806x_init);
+
+static void __exit nss_ipq806x_exit(void)
+{
+ platform_driver_unregister(&nss_ipq806x_driver);
+}
+module_exit(nss_ipq806x_exit);
+
--- a/include/linux/regulator/nss-volt-ipq806x.h
+++ b/include/linux/regulator/nss-volt-ipq806x.h
@@ -22,3 +22,28 @@
int nss_ramp_voltage(unsigned long rate, bool ramp_up);
#endif
+
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __QCOM_NSS_VOL_SCALING_H
+#define __QCOM_NSS_VOL_SCALING_H
+
+#include <linux/regulator/consumer.h>
+
+int nss_ramp_voltage(unsigned long rate, bool ramp_up);
+
+#endif

View File

@@ -0,0 +1,144 @@
From 733a75729c1fbb478caaed875dd9c09a878a553d Mon Sep 17 00:00:00 2001
From: Robert Marko <robimarko@gmail.com>
Date: Fri, 5 Jun 2020 11:44:27 +0200
Subject: [PATCH] Revert "ARM: dma-mapping: remove dmac_clean_range and
dmac_inv_range"
This partially reverts 'commit 702b94bff3c505 ("ARM: dma-mapping:
remove dmac_clean_range and dmac_inv_range")'
Some MSM drivers still use the dmac_clean and dmac_inv_range APIs.
Bring back the defines and exports for v7 CPUs.
Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
Signed-off-by: Abhimanyu Kapur <abhimany@codeaurora.org>
[sramana: resolved minor merge conflicts]
Signed-off-by: Srinivas Ramana <sramana@codeaurora.org>
(cherry picked from commit d6118c0a9f7ab2b131ca36dd3dbd5634603d14fe)
Change-Id: Ib2ddb4452711c5c2013bf29f0b5d8a3572b10357
Signed-off-by: Manoharan Vijaya Raghavan <mraghava@codeaurora.org>
Signed-off-by: Robert Marko <robimarko@gmail.com>
---
arch/arm/include/asm/cacheflush.h | 21 +++++++++++++++++++++
arch/arm/include/asm/glue-cache.h | 2 ++
arch/arm/mm/cache-v7.S | 6 ++++--
arch/arm/mm/proc-macros.S | 2 ++
arch/arm/mm/proc-syms.c | 3 +++
5 files changed, 32 insertions(+), 2 deletions(-)
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -91,6 +91,21 @@
* DMA Cache Coherency
* ===================
*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -112,6 +127,8 @@ struct cpu_cache_fns {
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
} __no_randomize_layout;
@@ -137,6 +154,8 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -156,6 +175,8 @@ extern void __cpuc_flush_dcache_area(voi
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -156,6 +156,8 @@ static inline void nop_dma_unmap_area(co
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
+#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
#endif
#endif
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -363,7 +363,7 @@ ENDPROC(v7_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -393,7 +393,7 @@ ENDPROC(v7_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -479,6 +479,8 @@ ENDPROC(v7_dma_unmap_area)
globl_equ b15_dma_map_area, v7_dma_map_area
globl_equ b15_dma_unmap_area, v7_dma_unmap_area
+ globl_equ b15_dma_inv_range, v7_dma_inv_range
+ globl_equ b15_dma_clean_range, v7_dma_clean_range
globl_equ b15_dma_flush_range, v7_dma_flush_range
define_cache_functions b15
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -334,6 +334,8 @@ ENTRY(\name\()_cache_fns)
.long \name\()_flush_kern_dcache_area
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
+ .long \name\()_dma_inv_range
+ .long \name\()_dma_clean_range
.long \name\()_dma_flush_range
.size \name\()_cache_fns, . - \name\()_cache_fns
.endm
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,6 +27,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(__cpuc_flush_dcache_area);
+EXPORT_SYMBOL(dmac_inv_range);
+EXPORT_SYMBOL(dmac_clean_range);
+EXPORT_SYMBOL(dmac_flush_range);
#else
EXPORT_SYMBOL(cpu_cache);
#endif

View File

@@ -0,0 +1,208 @@
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -283,6 +283,7 @@
#define EBI2_AON_CLK 281
#define NSSTCM_CLK_SRC 282
#define NSSTCM_CLK 283
+#define NSS_CORE_CLK 284 /* Virtual */
#define CE5_A_CLK_SRC 285
#define CE5_H_CLK_SRC 286
#define CE5_CORE_CLK_SRC 287
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -24,6 +24,10 @@
#include "clk-branch.h"
#include "clk-hfpll.h"
#include "reset.h"
+#include <linux/regulator/nss-volt-ipq806x.h>
+
+/* NSS safe parent index which will be used during NSS PLL rate change */
+static int gcc_ipq806x_nss_safe_parent;
static const struct clk_parent_data gcc_pxo[] = {
{ .fw_name = "pxo", .name = "pxo" },
@@ -3061,6 +3065,139 @@ static struct clk_branch ce5_h_clk = {
},
};
+static int nss_core_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int ret;
+
+ /*
+ * When ramping up voltage, it needs to be done first. This ensures that
+ * the volt required will be available when you step up the frequency.
+ */
+ ret = nss_ramp_voltage(rate, true);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate(&ubi32_core1_src_clk.clkr.hw, rate,
+ parent_rate);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate(&ubi32_core2_src_clk.clkr.hw, rate,
+ parent_rate);
+
+ if (ret)
+ return ret;
+
+ /*
+ * When ramping down voltage, it needs to be set first. This ensures
+ * that the volt required will be available until you step down the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, false);
+
+ return ret;
+}
+
+static int
+nss_core_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ int ret;
+
+ /*
+ * When ramping up voltage needs to be done first. This ensures that
+ * the voltage required will be available when you step up the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, true);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate_and_parent(
+ &ubi32_core1_src_clk.clkr.hw, rate, parent_rate, index);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate_and_parent(
+ &ubi32_core2_src_clk.clkr.hw, rate, parent_rate, index);
+
+ if (ret)
+ return ret;
+
+ /*
+ * When ramping down voltage needs to be done last. This ensures that
+ * the voltage required will be available when you step down the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, false);
+
+ return ret;
+}
+
+static int nss_core_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_dyn_rcg_ops.determine_rate(&ubi32_core1_src_clk.clkr.hw,
+ req);
+}
+
+static unsigned long
+nss_core_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ return clk_dyn_rcg_ops.recalc_rate(&ubi32_core1_src_clk.clkr.hw,
+ parent_rate);
+}
+
+static u8 nss_core_clk_get_parent(struct clk_hw *hw)
+{
+ return clk_dyn_rcg_ops.get_parent(&ubi32_core1_src_clk.clkr.hw);
+}
+
+static int nss_core_clk_set_parent(struct clk_hw *hw, u8 i)
+{
+ int ret;
+ struct clk_dyn_rcg *rcg;
+ struct freq_tbl f = { 200000000, P_PLL0, 2, 1, 2 };
+
+ /* P_PLL0 is 800 Mhz which needs to be divided for 200 Mhz */
+ if (i == gcc_ipq806x_nss_safe_parent) {
+ rcg = to_clk_dyn_rcg(&ubi32_core1_src_clk.clkr.hw);
+ clk_dyn_configure_bank(rcg, &f);
+
+ rcg = to_clk_dyn_rcg(&ubi32_core2_src_clk.clkr.hw);
+ clk_dyn_configure_bank(rcg, &f);
+
+ return 0;
+ }
+
+ ret = clk_dyn_rcg_ops.set_parent(&ubi32_core1_src_clk.clkr.hw, i);
+ if (ret)
+ return ret;
+
+ return clk_dyn_rcg_ops.set_parent(&ubi32_core2_src_clk.clkr.hw, i);
+}
+
+static const struct clk_ops clk_ops_nss_core = {
+ .set_rate = nss_core_clk_set_rate,
+ .set_rate_and_parent = nss_core_clk_set_rate_and_parent,
+ .determine_rate = nss_core_clk_determine_rate,
+ .recalc_rate = nss_core_clk_recalc_rate,
+ .get_parent = nss_core_clk_get_parent,
+ .set_parent = nss_core_clk_set_parent,
+};
+
+/* Virtual clock for nss core clocks */
+static struct clk_regmap nss_core_clk = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_core_clk",
+ .ops = &clk_ops_nss_core,
+ .parent_data = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap *gcc_ipq806x_clks[] = {
[PLL0] = &pll0.clkr,
[PLL0_VOTE] = &pll0_vote,
@@ -3180,6 +3317,7 @@ static struct clk_regmap *gcc_ipq806x_cl
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [NSS_CORE_CLK] = &nss_core_clk,
[PLL9] = &hfpll0.clkr,
[PLL10] = &hfpll1.clkr,
[PLL12] = &hfpll_l2.clkr,
@@ -3400,6 +3538,12 @@ static int gcc_ipq806x_probe(struct plat
if (!regmap)
return -ENODEV;
+ gcc_ipq806x_nss_safe_parent = qcom_find_src_index(&nss_core_clk.hw,
+ gcc_pxo_pll8_pll14_pll18_pll0_map,
+ P_PLL0);
+ if (gcc_ipq806x_nss_safe_parent < 0)
+ return gcc_ipq806x_nss_safe_parent;
+
/* Setup PLL18 static bits */
regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
regmap_write(regmap, 0x31b0, 0x3080);
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -818,6 +818,11 @@ static int clk_dyn_rcg_set_rate_and_pare
return __clk_dyn_rcg_set_rate(hw, rate);
}
+void clk_dyn_configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
+{
+ configure_bank(rcg, f);
+}
+
const struct clk_ops clk_rcg_ops = {
.enable = clk_enable_regmap,
.disable = clk_disable_regmap,
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -184,4 +184,7 @@ struct clk_rcg_dfs_data {
extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
const struct clk_rcg_dfs_data *rcgs,
size_t len);
+
+extern void clk_dyn_configure_bank(struct clk_dyn_rcg *rcg,
+ const struct freq_tbl *f);
#endif

View File

@@ -0,0 +1,11 @@
--- a/net/core/of_net.c
+++ b/net/core/of_net.c
@@ -39,7 +39,7 @@ int of_get_phy_mode(struct device_node *
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
if (!strcasecmp(pm, phy_modes(i))) {
*interface = i;
- return 0;
+ return i;
}
return -ENODEV;

View File

@@ -0,0 +1,46 @@
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -230,6 +230,7 @@ static const struct soc_id soc_id[] = {
{ 198, "MSM8126" },
{ 199, "APQ8026" },
{ 200, "MSM8926" },
+ { 202, "IPQ8064" },
{ 205, "MSM8326" },
{ 206, "MSM8916" },
{ 207, "MSM8994" },
@@ -414,11 +415,13 @@ QCOM_OPEN(pmic_die_rev, qcom_show_pmic_d
QCOM_OPEN(chip_id, qcom_show_chip_id);
#define DEFINE_IMAGE_OPS(type) \
-static int show_image_##type(struct seq_file *seq, void *p) \
+static int show_image_##type(struct seq_file *seq, void *p) \
{ \
struct smem_image_version *image_version = seq->private; \
- if (image_version->type[0] != '\0') \
- seq_printf(seq, "%s\n", image_version->type); \
+ if(!image_version && !image_version->type[0]) { \
+ seq_puts(seq, image_version->type); \
+ seq_puts(seq, "\n"); \
+ } \
return 0; \
} \
static int open_image_##type(struct inode *inode, struct file *file) \
@@ -606,7 +609,7 @@ static int qcom_socinfo_probe(struct pla
if (!qs)
return -ENOMEM;
- qs->attr.family = "Snapdragon";
+ qs->attr.family = "IPQ";
qs->attr.machine = socinfo_machine(&pdev->dev,
le32_to_cpu(info->id));
qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u",
@@ -623,6 +626,9 @@ static int qcom_socinfo_probe(struct pla
if (IS_ERR(qs->soc_dev))
return PTR_ERR(qs->soc_dev);
+ pr_info("CPU: %s, SoC Version: %s id: %d fmt: %x\n", qs->attr.machine,
+ qs->attr.revision, info->id, qs->info.fmt);
+
socinfo_debugfs_init(qs, info, item_size);
/* Feed the soc specific unique data into entropy pool */

View File

@@ -0,0 +1,54 @@
--- a/arch/arm/boot/dts/qcom-ipq8064-wg2600hp.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-wg2600hp.dts
@@ -161,9 +161,25 @@
&gmac1 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ reg = <0x37200000 0x200000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+
phy-mode = "rgmii";
qcom,id = <1>;
+ qcom,pcs-chanid = <0>;
+ qcom,phy-mdio-addr = <4>; /* AKRO: 4->1 */
+ qcom,poll-required = <0>; /* AKRO: 0->1 */
+ qcom,rgmii-delay = <1>; /* AKRO: 1->0 */
+ qcom,phy_mii_type = <0>; /* AKRO: 0->8(PHY_INTERFACE_MODE_RGMII) */
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <255>;
+ mdiobus = <&mdio0>;
+
pinctrl-0 = <&rgmii2_pins>;
pinctrl-names = "default";
@@ -179,9 +195,25 @@
&gmac2 {
status = "okay";
+ compatible = "qcom,nss-gmac";
+ reg = <0x37400000 0x200000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+
phy-mode = "sgmii";
qcom,id = <2>;
+ qcom,pcs-chanid = <1>;
+ qcom,phy-mdio-addr = <0>; /* AKRO: 0->0 */
+ qcom,poll-required = <0>; /* no polling */
+ qcom,rgmii-delay = <0>;
+ qcom,phy_mii_type = <1>; /* AKRO: 1->4(PHY_INTERFACE_MODE_SGMII) */
+ qcom,emulation = <0>;
+ qcom,forced-speed = <1000>;
+ qcom,forced-duplex = <1>;
+ qcom,socver = <0>;
+ qcom,irq = <258>;
+ mdiobus = <&mdio0>;
+
nvmem-cells = <&macaddr_PRODUCTDATA_0>;
nvmem-cell-names = "mac-address";

View File

@@ -0,0 +1,759 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -68,6 +68,7 @@ void brioctl_set(int (*hook)(struct net
void __user *uarg));
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg);
+extern bool br_is_hairpin_enabled(struct net_device *dev);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@@ -191,4 +192,42 @@ static inline clock_t br_get_ageing_time
}
#endif
+/* QCA NSS ECM support - Start */
+extern struct net_device *br_port_dev_get(struct net_device *dev,
+ unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie);
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
+extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr,
+ __u16 vid);
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned char *addr,
+ unsigned int cookie);
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
+
+#define BR_FDB_EVENT_ADD 0x01
+#define BR_FDB_EVENT_DEL 0x02
+
+struct br_fdb_event {
+ struct net_device *dev;
+ unsigned char addr[6];
+ unsigned char is_local;
+ struct net_bridge *br;
+ struct net_device *orig_dev;
+};
+extern void br_fdb_register_notify(struct notifier_block *nb);
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_get_dst_hook_t(
+ const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -230,7 +230,12 @@ extern void vlan_vids_del_by_dev(struct
extern bool vlan_uses_dev(const struct net_device *dev);
+/* QCA NSS ECM support - Start */
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev);
+/* QCA NSS ECM support - End */
+
#else
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1724,6 +1724,27 @@ enum netdev_ml_priv_type {
ML_PRIV_CAN,
};
+/* QCA NSS ECM support - Start */
+enum netdev_priv_flags_ext {
+ IFF_EXT_TUN_TAP = 1<<0,
+ IFF_EXT_PPP_L2TPV2 = 1<<1,
+ IFF_EXT_PPP_L2TPV3 = 1<<2,
+ IFF_EXT_PPP_PPTP = 1<<3,
+ IFF_EXT_GRE_V4_TAP = 1<<4,
+ IFF_EXT_GRE_V6_TAP = 1<<5,
+ IFF_EXT_IFB = 1<<6,
+};
+
+#define IFF_EXT_TUN_TAP IFF_EXT_TUN_TAP
+#define IFF_EXT_PPP_L2TPV2 IFF_EXT_PPP_L2TPV2
+#define IFF_EXT_PPP_L2TPV3 IFF_EXT_PPP_L2TPV3
+#define IFF_EXT_PPP_PPTP IFF_EXT_PPP_PPTP
+#define IFF_EXT_GRE_V4_TAP IFF_EXT_GRE_V4_TAP
+#define IFF_EXT_GRE_V6_TAP IFF_EXT_GRE_V6_TAP
+#define IFF_EXT_IFB IFF_EXT_IFB
+/* QCA NSS ECM support - End */
+
+
/**
* struct net_device - The DEVICE structure.
*
@@ -2855,6 +2876,10 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ /* QCA NSS ECM Support - Start */
+ NETDEV_BR_JOIN,
+ NETDEV_BR_LEAVE,
+ /* QCA NSS ECM Support - End */
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -211,6 +211,11 @@ void rt6_multipath_rebalance(struct fib6
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb);
+int rt6_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -567,4 +567,15 @@ static inline void neigh_update_is_route
*notify = 1;
}
}
+
+/* QCA NSS ECM support - Start */
+struct neigh_mac_update {
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+};
+
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -234,6 +234,11 @@ struct rtable *rt_dst_alloc(struct net_d
bool nopolicy, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb);
+int ip_rt_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -563,4 +563,12 @@ static int __init vlan_offload_init(void
return 0;
}
+/* QCA NSS ECM support - Start */
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_next_dev);
+/* QCA NSS ECM support - End */
+
fs_initcall(vlan_offload_init);
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -37,6 +37,35 @@ static int fdb_insert(struct net_bridge
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int, bool);
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
+
+void br_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
+
+void br_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
+
+void br_fdb_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
+
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -342,6 +371,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
+ u8 mac_addr[6]; /* QCA NSS ECM support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -368,8 +398,15 @@ void br_fdb_cleanup(struct work_struct *
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
- if (!hlist_unhashed(&f->fdb_node))
+ if (!hlist_unhashed(&f->fdb_node)) {
+ ether_addr_copy(mac_addr, f->key.addr.addr);
fdb_delete(br, f, true);
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list, 0,
+ (void *)mac_addr);
+ /* QCA NSS ECM support - End */
+ }
spin_unlock_bh(&br->hash_lock);
}
}
@@ -615,6 +652,12 @@ void br_fdb_update(struct net_bridge *br
&fdb->flags)))
clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags);
+
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list,
+ 0, (void *)addr);
+ /* QCA NSS ECM support - End */
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -794,6 +837,25 @@ static void fdb_notify(struct net_bridge
struct sk_buff *skb;
int err = -ENOBUFS;
+ /* QCA NSS ECM support - Start */
+ if (fdb->dst) {
+ int event;
+ struct br_fdb_event fdb_event;
+
+ if (type == RTM_NEWNEIGH)
+ event = BR_FDB_EVENT_ADD;
+ else
+ event = BR_FDB_EVENT_DEL;
+
+ fdb_event.dev = fdb->dst->dev;
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
+ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
+ event,
+ (void *)&fdb_event);
+ }
+ /* QCA NSS ECM support - End */
+
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
@@ -1377,3 +1439,62 @@ void br_fdb_clear_offload(const struct n
spin_unlock_bh(&p->br->hash_lock);
}
EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
+
+/* QCA NSS ECM support - Start */
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
+ addr);
+ return;
+ }
+
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, 0, true);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
+
+/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
+void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ if (likely(fdb)) {
+ fdb->updated = jiffies;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_fdb_entry_refresh);
+
+/* Look up the MAC address in the device's bridge fdb table */
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr, __u16 vid)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return NULL;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
+/* QCA NSS ECM support - End */
+
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
+#include <linux/version.h>
#include <net/dsa.h>
#include <net/sock.h>
#include <linux/if_vlan.h>
@@ -26,6 +27,12 @@
#include "br_private.h"
+/* QCA NSS ECM support - Start */
+/* Hook for external forwarding logic */
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
+/* QCA NSS ECM support - End */
+
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
@@ -707,6 +714,8 @@ int br_add_if(struct net_bridge *br, str
kobject_uevent(&p->kobj, KOBJ_ADD);
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
+
return 0;
err6:
@@ -742,6 +751,8 @@ int br_del_if(struct net_bridge *br, str
if (!p || p->br != br)
return -EINVAL;
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
+
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
@@ -785,3 +796,75 @@ bool br_port_flag_is_set(const struct ne
return p->flags & flag;
}
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
+
+/* QCA NSS ECM support - Start */
+/* API to know if hairpin feature is enabled/disabled on this bridge port */
+bool br_is_hairpin_enabled(struct net_device *dev)
+{
+ struct net_bridge_port *port = br_port_get_check_rcu(dev);
+
+ if (likely(port))
+ return port->flags & BR_HAIRPIN_MODE;
+ return false;
+}
+EXPORT_SYMBOL_GPL(br_is_hairpin_enabled);
+
+/* br_port_dev_get()
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
+ * use that to try and determine the egress port for that skb.
+ * If not, or no egress port could be determined, use the given addr
+ * to identify the port to which it is reachable,
+ * returing a reference to the net device associated with that port.
+ *
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
+ * associated port.
+ */
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie)
+{
+ struct net_bridge_fdb_entry *fdbe;
+ struct net_bridge *br;
+ struct net_device *netdev = NULL;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If the hook exists and the skb isn't NULL, try and get the port */
+ if (skb) {
+ br_port_dev_get_hook_t *port_dev_get_hook;
+
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
+ if (port_dev_get_hook) {
+ struct net_bridge_port *pdst =
+ __br_get(port_dev_get_hook, NULL, dev, skb,
+ addr, cookie);
+ if (pdst) {
+ dev_hold(pdst->dev);
+ netdev = pdst->dev;
+ goto out;
+ }
+ }
+ }
+
+ /* Either there is no hook, or can't
+ * determine the port to use - fall back to using FDB
+ */
+
+ br = netdev_priv(dev);
+
+ /* Lookup the fdb entry and get reference to the port dev */
+ fdbe = br_fdb_find_rcu(br, addr, 0);
+ if (fdbe && fdbe->dst) {
+ netdev = fdbe->dst->dev; /* port device */
+ dev_hold(netdev);
+ }
+out:
+ rcu_read_unlock();
+ return netdev;
+}
+EXPORT_SYMBOL_GPL(br_port_dev_get);
+/* QCA NSS ECM support - End */
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -2100,4 +2100,9 @@ void br_do_proxy_suppress_arp(struct sk_
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
+
+/* QCA NSS ECM support - Start */
+#define __br_get(__hook, __default, __args ...) \
+ (__hook ? (__hook(__args)) : (__default))
+/* QCA NSS ECM support - End */
#endif
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1690,7 +1690,7 @@ const char *netdev_cmd_to_name(enum netd
N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
- N(PRE_CHANGEADDR)
+ N(PRE_CHANGEADDR) N(BR_JOIN) N(BR_LEAVE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1211,7 +1211,21 @@ static void neigh_update_hhs(struct neig
}
}
+/* QCA NSS ECM support - start */
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
+void neigh_mac_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
+
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
+/* QCA NSS ECM support - End */
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
@@ -1242,6 +1256,7 @@ static int __neigh_update(struct neighbo
int notify = 0;
struct net_device *dev;
int update_isrouter = 0;
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1256,6 +1271,8 @@ static int __neigh_update(struct neighbo
new = old;
goto out;
}
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
+
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
@@ -1293,6 +1310,11 @@ static int __neigh_update(struct neighbo
- compare new & old
- if they are different, check override flag
*/
+ /* QCA NSS ECM update - Start */
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
+ /* QCA NSS ECM update - End */
+
if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
@@ -1415,8 +1437,11 @@ out:
if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
neigh_update_gc_list(neigh);
- if (notify)
+ if (notify) {
neigh_update_notify(neigh, nlmsg_pid);
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
+ }
trace_neigh_update_done(neigh, err);
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1206,6 +1206,9 @@ static bool fib_valid_key_len(u32 key, u
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
+/* Define route change notification chain. */
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
@@ -1398,6 +1401,9 @@ int fib_table_insert(struct net *net, st
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_NEWROUTE, fi);
+
return 0;
out_remove_new_fa:
@@ -1769,6 +1775,9 @@ int fib_table_delete(struct net *net, st
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_DELROUTE, fa_to_delete->fa_info);
+
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
@@ -2401,6 +2410,20 @@ void __init fib_trie_init(void)
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_register_notifier);
+
+int ip_rt_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
+/* QCA NSS ECM support - End */
+
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1003,6 +1003,7 @@ void inet6_ifa_finish_destroy(struct ine
kfree_rcu(ifp, rcu);
}
+EXPORT_SYMBOL(inet6_ifa_finish_destroy);
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
@@ -2064,6 +2065,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(str
return result;
}
+EXPORT_SYMBOL(ipv6_get_ifaddr);
/* Gets referenced address, destroys ifaddr */
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -650,6 +650,7 @@ void ndisc_send_ns(struct net_device *de
ndisc_send_skb(skb, daddr, saddr);
}
+EXPORT_SYMBOL(ndisc_send_ns);
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
const struct in6_addr *daddr)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3875,6 +3875,9 @@ out_free:
return ERR_PTR(err);
}
+/* Define route change notification chain. */
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
+
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
@@ -3886,6 +3889,10 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, rt);
+
fib6_info_release(rt);
return err;
@@ -3907,6 +3914,9 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -6350,6 +6360,20 @@ static int ip6_route_dev_notify(struct n
return NOTIFY_OK;
}
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_register_notifier);
+
+int rt6_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_unregister_notifier);
+/* QCA NSS ECM support - End */
+
/*
* /proc
*/
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1940,6 +1940,7 @@ static void ip6gre_tap_setup(struct net_
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags_ext |= IFF_EXT_GRE_V6_TAP; /* QCA NSS ECM Support */
netif_keep_dst(dev);
}
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1336,6 +1336,7 @@ static void ipgre_tap_setup(struct net_d
dev->netdev_ops = &gre_tap_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags_ext |= IFF_EXT_GRE_V4_TAP; /* QCA NSS ECM Support */
ip_tunnel_setup(dev, gre_tap_net_id);
}
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -31,4 +31,36 @@ void nf_ct_gre_keymap_destroy(struct nf_
bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple);
+
+/* QCA NSS ECM Support - Start */
+/* GRE is a mess: Four different standards */
+struct gre_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 rec:3,
+ srr:1,
+ seq:1,
+ key:1,
+ routing:1,
+ csum:1,
+ version:3,
+ reserved:4,
+ ack:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 csum:1,
+ routing:1,
+ key:1,
+ seq:1,
+ srr:1,
+ rec:3,
+ ack:1,
+ reserved:4,
+ version:3;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __be16 protocol;
+};
+/* QCA NSS ECM Support - End */
+
+
#endif /* _CONNTRACK_PROTO_GRE_H */
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -123,5 +123,6 @@ static inline void nf_ct_destroy_timeout
extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
+extern unsigned int *udp_get_timeouts(struct net *net);
#endif /* _NF_CONNTRACK_TIMEOUT_H */
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -29,10 +29,11 @@ static const unsigned int udp_timeouts[U
[UDP_CT_REPLIED] = 120*HZ,
};
-static unsigned int *udp_get_timeouts(struct net *net)
+unsigned int *udp_get_timeouts(struct net *net)
{
return nf_udp_pernet(net)->timeouts;
}
+EXPORT_SYMBOL(udp_get_timeouts);
static void udp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,

View File

@@ -0,0 +1,207 @@
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -113,6 +113,9 @@ struct netns_ct {
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct atomic_notifier_head nf_conntrack_chain;
+#endif
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -78,6 +78,11 @@ struct nf_exp_event {
int report;
};
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
+#endif
+
struct nf_ct_event_notifier {
int (*ct_event)(unsigned int events, const struct nf_ct_event *item);
int (*exp_event)(unsigned int events, const struct nf_exp_event *item);
@@ -111,11 +116,13 @@ static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
+#endif
e = nf_ct_ecache_find(ct);
if (e == NULL)
@@ -130,10 +137,12 @@ nf_conntrack_event_report(enum ip_conntr
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#else
@@ -145,10 +154,12 @@ static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#else
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -160,6 +160,21 @@ config NF_CONNTRACK_TIMEOUT
If unsure, say `N'.
+config NF_CONNTRACK_DSCPREMARK_EXT
+ bool 'Connection tracking extension for dscp remark target'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking extension
+ for dscp remark.
+
+config NF_CONNTRACK_CHAIN_EVENTS
+ bool "Register multiple callbacks to ct events"
+ depends on NF_CONNTRACK_EVENTS
+ help
+ Support multiple registrations.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2888,6 +2888,9 @@ int nf_conntrack_init_net(struct net *ne
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_helper_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain);
+#endif
return 0;
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -17,6 +17,9 @@
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+#include <linux/notifier.h>
+#endif
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
@@ -150,8 +153,15 @@ static int __nf_conntrack_eventmask_repo
rcu_read_unlock();
return 0;
}
-
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed, &item);
+
+ if (notify)
+ ret = notify->ct_event(events | missed, item);
+#else
ret = notify->ct_event(events | missed, item);
+#endif
rcu_read_unlock();
if (likely(ret >= 0 && missed == 0))
@@ -245,7 +255,11 @@ void nf_ct_expect_event_report(enum ip_c
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (!notify)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(exp->master);
@@ -264,6 +278,14 @@ out_unlock:
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
+#endif
+
void nf_conntrack_register_notifier(struct net *net,
const struct nf_ct_event_notifier *new)
{
@@ -278,6 +300,14 @@ void nf_conntrack_register_notifier(stru
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
+#endif
+
void nf_conntrack_unregister_notifier(struct net *net)
{
mutex_lock(&nf_ct_ecache_mutex);
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,6 +28,10 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */
+#endif
+
NF_CT_EXT_NUM,
};
@@ -40,6 +44,9 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
+/* QCA NSS ECM support - Start */
+#define NF_CT_EXT_DSCPREMARK_TYPE struct nf_ct_dscpremark_ext
+/* QCA NSS ECM support - End */
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -14,6 +14,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_LABEL
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o
obj-$(CONFIG_NETFILTER) = netfilter.o

View File

@@ -0,0 +1,577 @@
From ac4b71aecf237fd07a29788706d198b4e36fa660 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:32:54 +0100
Subject: [PATCH] Update 602-qca-add-pppoe-offload-support.patch for kernel
5.15
---
drivers/net/ppp/ppp_generic.c | 276 +++++++++++++++++++++++++++++++++-
drivers/net/ppp/pppoe.c | 82 +++++++++-
include/linux/if_pppox.h | 13 ++
include/linux/netdevice.h | 19 +++
include/linux/ppp_channel.h | 63 +++++++-
5 files changed, 445 insertions(+), 8 deletions(-)
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -48,6 +48,7 @@
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/if_pppox.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
@@ -253,6 +254,25 @@ struct ppp_net {
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/*
+ * Registration/Unregistration methods
+ * for PPP channel connect and disconnect event notifications.
+ */
+RAW_NOTIFIER_HEAD(ppp_channel_connection_notifier_list);
+
+void ppp_channel_connection_register_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_register_notify);
+
+void ppp_channel_connection_unregister_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_unregister_notify);
+
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
@@ -3452,7 +3472,10 @@ ppp_connect_channel(struct channel *pch,
struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
+ int ppp_proto;
+ int version;
+ int notify = 0;
pn = ppp_pernet(pch->chan_net);
mutex_lock(&pn->all_ppp_mutex);
@@ -3484,13 +3507,40 @@ ppp_connect_channel(struct channel *pch,
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
+
+ /* Set the netdev priv flag if the prototype
+ * is L2TP or PPTP. Return success in all cases
+ */
+ if (!pch->chan)
+ goto out2;
+
+ ppp_proto = ppp_channel_get_protocol(pch->chan);
+ if (ppp_proto == PX_PROTO_PPTP) {
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_PPTP;
+ } else if (ppp_proto == PX_PROTO_OL2TP) {
+ version = ppp_channel_get_proto_version(pch->chan);
+ if (version == 2)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV2;
+ else if (version == 3)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV3;
+ }
+ notify = 1;
+
+ out2:
ppp_unlock(ppp);
ret = 0;
-
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
+
+ if (notify && ppp && ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_CONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
return ret;
}
@@ -3508,6 +3558,13 @@ ppp_disconnect_channel(struct channel *p
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
+ if (ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_DISCONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
@@ -3587,6 +3644,222 @@ static void *unit_find(struct idr *p, in
return idr_find(p, n);
}
+/* Updates the PPP interface statistics. */
+void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return;
+
+ if (dev->type != ARPHRD_PPP)
+ return;
+
+ ppp = netdev_priv(dev);
+
+ ppp_xmit_lock(ppp);
+ ppp->stats64.tx_packets += tx_packets;
+ ppp->stats64.tx_bytes += tx_bytes;
+ ppp->dev->stats.tx_errors += tx_errors;
+ ppp->dev->stats.tx_dropped += tx_dropped;
+ if (tx_packets)
+ ppp->last_xmit = jiffies;
+ ppp_xmit_unlock(ppp);
+
+ ppp_recv_lock(ppp);
+ ppp->stats64.rx_packets += rx_packets;
+ ppp->stats64.rx_bytes += rx_bytes;
+ ppp->dev->stats.rx_errors += rx_errors;
+ ppp->dev->stats.rx_dropped += rx_dropped;
+ if (rx_packets)
+ ppp->last_recv = jiffies;
+ ppp_recv_unlock(ppp);
+}
+
+/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
+ * the device is not PPP.
+ */
+int ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flags = ppp->flags;
+ ppp_unlock(ppp);
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ppp_is_multilink);
+
+/* ppp_channel_get_protocol()
+ * Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ *
+ * NOTE: Some channels do not use PX sockets so the protocol value may be very
+ * different for them.
+ * NOTE: -1 indicates failure.
+ * NOTE: Once you know the channel protocol you may then either cast 'chan' to
+ * its sub-class or use the channel protocol specific API's as provided by that
+ * channel sub type.
+ */
+int ppp_channel_get_protocol(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol)
+ return -1;
+
+ return chan->ops->get_channel_protocol(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_protocol);
+
+/* ppp_channel_get_proto_version()
+ * Call this to get channel protocol version
+ */
+int ppp_channel_get_proto_version(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol_ver)
+ return -1;
+
+ return chan->ops->get_channel_protocol_ver(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_proto_version);
+
+/* ppp_channel_hold()
+ * Call this to hold a channel.
+ *
+ * Returns true on success or false if the hold could not happen.
+ *
+ * NOTE: chan must be protected against destruction during this call -
+ * either by correct locking etc. or because you already have an implicit
+ * or explicit hold to the channel already and this is an additional hold.
+ */
+bool ppp_channel_hold(struct ppp_channel *chan)
+{
+ if (!chan->ops->hold)
+ return false;
+
+ chan->ops->hold(chan);
+ return true;
+}
+EXPORT_SYMBOL(ppp_channel_hold);
+
+/* ppp_channel_release()
+ * Call this to release a hold you have upon a channel
+ */
+void ppp_channel_release(struct ppp_channel *chan)
+{
+ chan->ops->release(chan);
+}
+EXPORT_SYMBOL(ppp_channel_release);
+
+/* Check if ppp xmit lock is on hold */
+bool ppp_is_xmit_locked(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ if (!ppp)
+ return false;
+
+ if (spin_is_locked(&(ppp)->wlock))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ppp_is_xmit_locked);
+
+/* ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ ppp_lock(ppp);
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ ppp_unlock(ppp);
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ ppp_unlock(ppp);
+ return c;
+}
+EXPORT_SYMBOL(ppp_hold_channels);
+
+/* ppp_release_channels()
+ * Releases channels
+ */
+void ppp_release_channels(struct ppp_channel *channels[], unsigned int chan_sz)
+{
+ unsigned int c;
+
+ for (c = 0; c < chan_sz; ++c) {
+ struct ppp_channel *chan;
+
+ chan = channels[c];
+ chan->ops->release(chan);
+ }
+}
+EXPORT_SYMBOL(ppp_release_channels);
+
/* Module/initialization stuff */
module_init(ppp_init);
@@ -3603,6 +3876,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+EXPORT_SYMBOL(ppp_update_stats);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -62,6 +62,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/if_pppox.h>
@@ -87,7 +88,7 @@
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
-static const struct ppp_channel_ops pppoe_chan_ops;
+static const struct pppoe_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
static unsigned int pppoe_net_id __read_mostly;
@@ -692,7 +693,7 @@ static int pppoe_connect(struct socket *
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
- po->chan.ops = &pppoe_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
@@ -995,9 +996,80 @@ static int pppoe_fill_forward_path(struc
return 0;
}
-static const struct ppp_channel_ops pppoe_chan_ops = {
- .start_xmit = pppoe_xmit,
- .fill_forward_path = pppoe_fill_forward_path,
+/************************************************************************
+ *
+ * function called by generic PPP driver to hold channel
+ *
+ ***********************************************************************/
+static void pppoe_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/************************************************************************
+ *
+ * function called by generic PPP driver to release channel
+ *
+ ***********************************************************************/
+static void pppoe_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/************************************************************************
+ *
+ * function called to get the channel protocol type
+ *
+ ***********************************************************************/
+static int pppoe_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OE;
+}
+
+/************************************************************************
+ *
+ * function called to get the PPPoE channel addressing
+ * NOTE: This function returns a HOLD to the netdevice
+ *
+ ***********************************************************************/
+static int pppoe_get_addressing(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ int err = 0;
+
+ *addressing = po->proto.pppoe;
+ if (!addressing->dev)
+ return -ENODEV;
+
+ dev_hold(addressing->dev);
+ return err;
+}
+
+/* pppoe_channel_addressing_get()
+ * Return PPPoE channel specific addressing information.
+ */
+int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ return pppoe_get_addressing(chan, addressing);
+}
+EXPORT_SYMBOL(pppoe_channel_addressing_get);
+
+static const struct pppoe_channel_ops pppoe_chan_ops = {
+ /* PPPoE specific channel ops */
+ .get_addressing = pppoe_get_addressing,
+ /* General ppp channel ops */
+ .ops.start_xmit = pppoe_xmit,
+ .ops.get_channel_protocol = pppoe_get_channel_protocol,
+ .ops.hold = pppoe_hold_chan,
+ .ops.release = pppoe_release_chan,
+ .ops.fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -93,4 +93,17 @@ enum {
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
+/*
+ * PPPoE Channel specific operations
+ */
+struct pppoe_channel_ops {
+ /* Must be first - general to all PPP channels */
+ struct ppp_channel_ops ops;
+ int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
+};
+
+/* Return PPPoE channel specific addressing information */
+extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2039,6 +2039,7 @@ struct net_device {
unsigned int flags;
unsigned int priv_flags;
unsigned int extra_priv_flags;
+ unsigned int priv_flags_ext;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -19,6 +19,10 @@
#include <linux/skbuff.h>
#include <linux/poll.h>
#include <net/net_namespace.h>
+#include <linux/notifier.h>
+
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
struct ppp_channel;
@@ -28,9 +32,19 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ /* Get channel protocol type, one of PX_PROTO_XYZ or specific to
+ * the channel subtype
+ */
+ int (*get_channel_protocol)(struct ppp_channel *);
+ /* Get channel protocol version */
+ int (*get_channel_protocol_ver)(struct ppp_channel *);
+ /* Hold the channel from being destroyed */
+ void (*hold)(struct ppp_channel *);
+ /* Release hold on the channel */
+ void (*release)(struct ppp_channel *);
int (*fill_forward_path)(struct net_device_path_ctx *,
- struct net_device_path *,
- const struct ppp_channel *);
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
@@ -74,6 +88,51 @@ extern int ppp_unit_number(struct ppp_ch
/* Get the device name associated with a channel, or NULL if none */
extern char *ppp_dev_name(struct ppp_channel *);
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
+
/*
* SMP locking notes:
* The channel code must ensure that when it calls ppp_unregister_channel,

View File

@@ -0,0 +1,95 @@
From 3c17a0e1112be70071e98d5208da5b55dcec20a6 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:37:29 +0100
Subject: [PATCH] Update 605-qca-add-add-nss-bridge-mgr-support.patch for
kernel 5.15
---
include/linux/if_bridge.h | 4 ++++
net/bridge/br_fdb.c | 25 +++++++++++++++++++++----
2 files changed, 25 insertions(+), 4 deletions(-)
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -230,4 +230,8 @@ typedef struct net_bridge_port *br_get_d
extern br_get_dst_hook_t __rcu *br_get_dst_hook;
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
+/* QCA NSS bridge-mgr support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -66,6 +66,15 @@ void br_fdb_update_unregister_notify(str
EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br)
+{
+ dev_hold(br->dev);
+ return br->dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_bridge_dev_get_and_hold);
+/* QCA NSS bridge-mgr support - End */
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -371,7 +380,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
- u8 mac_addr[6]; /* QCA NSS ECM support */
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -399,12 +408,13 @@ void br_fdb_cleanup(struct work_struct *
} else {
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node)) {
- ether_addr_copy(mac_addr, f->key.addr.addr);
+ memset(&fdb_event, 0, sizeof(fdb_event));
+ ether_addr_copy(fdb_event.addr, f->key.addr.addr);
fdb_delete(br, f, true);
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list, 0,
- (void *)mac_addr);
+ (void *)&fdb_event);
/* QCA NSS ECM support - End */
}
spin_unlock_bh(&br->hash_lock);
@@ -620,6 +630,7 @@ void br_fdb_update(struct net_bridge *br
const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* some users want to always flood. */
if (hold_time(br) == 0)
@@ -645,6 +656,12 @@ void br_fdb_update(struct net_bridge *br
if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
+ /* QCA NSS bridge-mgr support - Start */
+ ether_addr_copy(fdb_event.addr, addr);
+ fdb_event.br = br;
+ fdb_event.orig_dev = READ_ONCE(fdb->dst->dev);
+ fdb_event.dev = source->dev;
+ /* QCA NSS bridge-mgr support - End */
WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
@@ -656,7 +673,7 @@ void br_fdb_update(struct net_bridge *br
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list,
- 0, (void *)addr);
+ 0, (void *)&fdb_event);
/* QCA NSS ECM support - End */
}

View File

@@ -0,0 +1,81 @@
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -249,6 +249,9 @@ static const struct flow_dissector_key f
},
};
+/* QCA NSS bonding support */
+static unsigned long bond_id_mask = 0xFFFFFFF0;
+
static struct flow_dissector flow_keys_bonding __read_mostly;
/*-------------------------- Forward declarations ---------------------------*/
@@ -4115,6 +4118,23 @@ static int bond_get_lowest_level_rcu(str
}
#endif
+/* QCA NSS bonding support */
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+ int bond_id = 0;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+ bond_id = bond->id;
+
+ return bond_id;
+}
+EXPORT_SYMBOL(bond_get_id);
+
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -5447,6 +5467,10 @@ static void bond_destructor(struct net_d
if (bond->rr_tx_counter)
free_percpu(bond->rr_tx_counter);
+
+ /* QCA NSS bonding support */
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
}
void bond_setup(struct net_device *bond_dev)
@@ -6020,7 +6044,14 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
- rtnl_unlock();
+ /* QCA NSS bonding support */
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
+
+ rtnl_unlock();
return 0;
}
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -257,6 +257,7 @@ struct bonding {
spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
+ u32 id; /* QCA NSS bonding */
};
#define bond_slave_get_rcu(dev) \
@@ -631,6 +632,7 @@ struct bond_net {
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_get_id(struct net_device *bond_dev); /* QCA NSS bonding support */
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);

View File

@@ -0,0 +1,96 @@
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -15,6 +15,13 @@ struct macvlan_port;
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
+/* QCA NSS ECM Support - Start */
+/*
+ * Callback for updating interface statistics for macvlan flows offloaded from host CPU.
+ */
+typedef void (*macvlan_offload_stats_update_cb_t)(struct net_device *dev, struct rtnl_link_stats64 *stats, bool update_mcast_rx_stats);
+/* QCA NSS ECM Support - End */
+
struct macvlan_dev {
struct net_device *dev;
struct list_head list;
@@ -34,6 +41,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ macvlan_offload_stats_update_cb_t offload_stats_update; /* QCA NSS ECM support */
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -106,4 +114,26 @@ static inline int macvlan_release_l2fw_o
macvlan->accel_priv = NULL;
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
}
+
+/* QCA NSS ECM Support - Start */
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline void
+macvlan_offload_stats_update(struct net_device *dev,
+ struct rtnl_link_stats64 *stats,
+ bool update_mcast_rx_stats)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->offload_stats_update(dev, stats, update_mcast_rx_stats);
+}
+
+static inline enum
+macvlan_mode macvlan_get_mode(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode;
+}
+#endif
+/* QCA NSS ECM Support - End */
#endif /* _LINUX_IF_MACVLAN_H */
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -930,6 +930,34 @@ static void macvlan_uninit(struct net_de
macvlan_port_destroy(port->dev);
}
+/* QCA NSS ECM Support - Start */
+/* Update macvlan statistics processed by offload engines */
+static void macvlan_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *offl_stats,
+ bool update_mcast_rx_stats)
+{
+ struct vlan_pcpu_stats *stats;
+ struct macvlan_dev *macvlan;
+
+ /* Is this a macvlan? */
+ if (!netif_is_macvlan(dev))
+ return;
+
+ macvlan = netdev_priv(dev);
+ stats = this_cpu_ptr(macvlan->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += offl_stats->rx_packets;
+ stats->rx_bytes += offl_stats->rx_bytes;
+ stats->tx_packets += offl_stats->tx_packets;
+ stats->tx_bytes += offl_stats->tx_bytes;
+ /* Update multicast statistics */
+ if (unlikely(update_mcast_rx_stats)) {
+ stats->rx_multicast += offl_stats->rx_packets;
+ }
+ u64_stats_update_end(&stats->syncp);
+}
+/* QCA NSS ECM Support - End */
+
static void macvlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -1465,6 +1493,7 @@ int macvlan_common_newlink(struct net *s
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->offload_stats_update = macvlan_dev_update_stats; /* QCA NSS ECM Support */
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])

View File

@@ -0,0 +1,44 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -689,6 +689,7 @@ typedef unsigned char *sk_buff_data_t;
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_skip_classify_offload: do not classify packet set by offload IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
@@ -905,6 +906,8 @@ struct sk_buff {
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
__u8 tc_at_ingress:1;
+ __u8 tc_skip_classify_offload:1;
+ __u16 tc_verd_qca_nss; /* QCA NSS Qdisc Support */
#endif
__u8 redirected:1;
#ifdef CONFIG_NET_REDIRECT
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -136,6 +136,7 @@ enum tca_id {
TCA_ID_MPLS,
TCA_ID_CT,
TCA_ID_GATE,
+ TCA_ID_MIRRED_NSS, /* QCA NSS Qdisc IGS Support */
/* other actions go here */
__TCA_ID_MAX = 255
};
@@ -776,4 +777,14 @@ enum {
TCF_EM_OPND_LT
};
+/* QCA NSS Qdisc Support - Start */
+#define _TC_MAKE32(x) ((x))
+#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n))
+
+#define TC_NCLS _TC_MAKEMASK1(8)
+#define TC_NCLS_NSS _TC_MAKEMASK1(12)
+#define SET_TC_NCLS_NSS(v) ( TC_NCLS_NSS | ((v) & ~TC_NCLS_NSS))
+#define CLR_TC_NCLS_NSS(v) ( (v) & ~TC_NCLS_NSS)
+/* QCA NSS Qdisc Support - End */
+
#endif

View File

@@ -0,0 +1,442 @@
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -17,6 +17,7 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
+ unsigned long cust_data;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -125,6 +125,31 @@ resched:
}
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats)
+{
+ struct ifb_dev_private *dp;
+ struct ifb_q_private *txp;
+
+ if (!dev || !offload_stats) {
+ return;
+ }
+
+ if (!(dev->priv_flags_ext & IFF_EXT_IFB)) {
+ return;
+ }
+
+ dp = netdev_priv(dev);
+ txp = dp->tx_private;
+
+ u64_stats_update_begin(&txp->rsync);
+ txp->rx_packets += offload_stats->rx_packets;
+ txp->rx_bytes += offload_stats->rx_bytes;
+ txp->tx_packets += offload_stats->tx_packets;
+ txp->tx_bytes += offload_stats->tx_bytes;
+ u64_stats_update_end(&txp->rsync);
+}
+EXPORT_SYMBOL(ifb_update_offload_stats);
+
static void ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -224,6 +249,7 @@ static void ifb_setup(struct net_device
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */
netif_keep_dst(dev);
eth_hw_addr_random(dev);
dev->needs_free_netdev = true;
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4758,6 +4758,15 @@ void dev_uc_flush(struct net_device *dev
void dev_uc_init(struct net_device *dev);
/**
+ * ifb_update_offload_stats - Update the IFB interface stats
+ * @dev: IFB device to update the stats
+ * @offload_stats: per CPU stats structure
+ *
+ * Allows update of IFB stats when flows are offloaded to an accelerator.
+ **/
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats);
+
+/**
* __dev_uc_sync - Synchonize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
@@ -5316,6 +5325,11 @@ static inline bool netif_is_failover_sla
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}
+static inline bool netif_is_ifb_dev(const struct net_device *dev)
+{
+ return dev->priv_flags_ext & IFF_EXT_IFB;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1265,4 +1265,248 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* QCA NSS Clients Support - Start */
+enum {
+ TCA_NSS_ACCEL_MODE_NSS_FW,
+ TCA_NSS_ACCEL_MODE_PPE,
+ TCA_NSS_ACCEL_MODE_MAX
+};
+
+/* NSSFIFO section */
+
+enum {
+ TCA_NSSFIFO_UNSPEC,
+ TCA_NSSFIFO_PARMS,
+ __TCA_NSSFIFO_MAX
+};
+
+#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1)
+
+struct tc_nssfifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRED section */
+
+enum {
+ TCA_NSSWRED_UNSPEC,
+ TCA_NSSWRED_PARMS,
+ __TCA_NSSWRED_MAX
+};
+
+#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1)
+#define NSSWRED_CLASS_MAX 6
+struct tc_red_alg_parameter {
+ __u32 min; /* qlen_avg < min: pkts are all enqueued */
+ __u32 max; /* qlen_avg > max: pkts are all dropped */
+ __u32 probability;/* Drop probability at qlen_avg = max */
+ __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */
+};
+
+struct tc_nsswred_traffic_class {
+ __u32 limit; /* Queue length */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* Parameters for RED alg */
+};
+
+/*
+ * Weight modes for WRED
+ */
+enum tc_nsswred_weight_modes {
+ TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */
+ TC_NSSWRED_WEIGHT_MODES, /* Must be last */
+};
+
+struct tc_nsswred_qopt {
+ __u32 limit; /* Queue length */
+ enum tc_nsswred_weight_modes weight_mode;
+ /* Weight mode */
+ __u32 traffic_classes; /* How many traffic classes: DPs */
+ __u32 def_traffic_class; /* Default traffic if no match: def_DP */
+ __u32 traffic_id; /* The traffic id to be configured: DP */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* RED algorithm parameters */
+ struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX];
+ /* Traffic settings for dumpping */
+ __u8 ecn; /* Setting ECN bit or dropping */
+ __u8 set_default; /* Sets qdisc to be the default for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSCODEL section */
+
+enum {
+ TCA_NSSCODEL_UNSPEC,
+ TCA_NSSCODEL_PARMS,
+ __TCA_NSSCODEL_MAX
+};
+
+#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1)
+
+struct tc_nsscodel_qopt {
+ __u32 target; /* Acceptable queueing delay */
+ __u32 limit; /* Max number of packets that can be held in the queue */
+ __u32 interval; /* Monitoring interval */
+ __u32 flows; /* Number of flow buckets */
+ __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */
+ __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+struct tc_nsscodel_xstats {
+ __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
+ __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */
+};
+
+/* NSSFQ_CODEL section */
+
+struct tc_nssfq_codel_xstats {
+ __u32 new_flow_count; /* Total number of new flows seen */
+ __u32 new_flows_len; /* Current number of new flows */
+ __u32 old_flows_len; /* Current number of old flows */
+ __u32 ecn_mark; /* Number of packets marked with ECN */
+ __u32 drop_overlimit; /* Number of packets dropped due to overlimit */
+ __u32 maxpacket; /* The largest packet seen so far in the queue */
+};
+
+/* NSSTBL section */
+
+enum {
+ TCA_NSSTBL_UNSPEC,
+ TCA_NSSTBL_PARMS,
+ __TCA_NSSTBL_MAX
+};
+
+#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1)
+
+struct tc_nsstbl_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Limiting rate of TBF */
+ __u32 peakrate; /* Maximum rate at which TBF is allowed to send */
+ __u32 mtu; /* Max size of packet, or minumim burst size */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSPRIO section */
+
+#define TCA_NSSPRIO_MAX_BANDS 256
+
+enum {
+ TCA_NSSPRIO_UNSPEC,
+ TCA_NSSPRIO_PARMS,
+ __TCA_NSSPRIO_MAX
+};
+
+#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1)
+
+struct tc_nssprio_qopt {
+ __u32 bands; /* Number of bands */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBF section */
+
+enum {
+ TCA_NSSBF_UNSPEC,
+ TCA_NSSBF_CLASS_PARMS,
+ TCA_NSSBF_QDISC_PARMS,
+ __TCA_NSSBF_MAX
+};
+
+#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1)
+
+struct tc_nssbf_class_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 mtu; /* MTU of the associated interface */
+ __u32 quantum; /* Quantum allocation for DRR */
+};
+
+struct tc_nssbf_qopt {
+ __u16 defcls; /* Default class value */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRR section */
+
+enum {
+ TCA_NSSWRR_UNSPEC,
+ TCA_NSSWRR_CLASS_PARMS,
+ TCA_NSSWRR_QDISC_PARMS,
+ __TCA_NSSWRR_MAX
+};
+
+#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1)
+
+struct tc_nsswrr_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswrr_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWFQ section */
+
+enum {
+ TCA_NSSWFQ_UNSPEC,
+ TCA_NSSWFQ_CLASS_PARMS,
+ TCA_NSSWFQ_QDISC_PARMS,
+ __TCA_NSSWFQ_MAX
+};
+
+#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1)
+
+struct tc_nsswfq_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswfq_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSHTB section */
+
+enum {
+ TCA_NSSHTB_UNSPEC,
+ TCA_NSSHTB_CLASS_PARMS,
+ TCA_NSSHTB_QDISC_PARMS,
+ __TCA_NSSHTB_MAX
+};
+
+#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1)
+
+struct tc_nsshtb_class_qopt {
+ __u32 burst; /* Allowed burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 cburst; /* Maximum burst size */
+ __u32 crate; /* Maximum bandwidth for this class */
+ __u32 quantum; /* Quantum allocation for DRR */
+ __u32 priority; /* Priority value associated with this class */
+ __u32 overhead; /* Overhead in bytes per packet */
+};
+
+struct tc_nsshtb_qopt {
+ __u32 r2q; /* Rate to quantum ratio */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBLACKHOLE section */
+
+enum {
+ TCA_NSSBLACKHOLE_UNSPEC,
+ TCA_NSSBLACKHOLE_PARMS,
+ __TCA_NSSBLACKHOLE_MAX
+};
+
+#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1)
+
+struct tc_nssblackhole_qopt {
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+/* QCA NSS Clients Support - End */
#endif
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -2351,4 +2351,26 @@ static int __init pktsched_init(void)
return 0;
}
+/* QCA NSS Qdisc Support - Start */
+bool tcf_destroy(struct tcf_proto *tp, bool force)
+{
+ tp->ops->destroy(tp, force, NULL);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+
+ return true;
+}
+
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+ struct tcf_proto *tp;
+
+ while ((tp = rtnl_dereference(*fl)) != NULL) {
+ RCU_INIT_POINTER(*fl, tp->next);
+ tcf_destroy(tp, true);
+ }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
+/* QCA NSS Qdisc Support - End */
+
subsys_initcall(pktsched_init);
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1008,7 +1008,7 @@ static void qdisc_free_cb(struct rcu_hea
qdisc_free(q);
}
-static void qdisc_destroy(struct Qdisc *qdisc)
+void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
@@ -1031,6 +1031,7 @@ static void qdisc_destroy(struct Qdisc *
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
+EXPORT_SYMBOL(qdisc_destroy);
void qdisc_put(struct Qdisc *qdisc)
{
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -87,6 +87,7 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -738,6 +739,40 @@ static inline bool skb_skip_tc_classify(
return false;
}
+/*
+ * Set skb classify bit field.
+ */
+static inline void skb_set_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 1;
+#endif
+}
+
+/*
+ * Clear skb classify bit field.
+ */
+static inline void skb_clear_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 0;
+#endif
+}
+
+/*
+ * Skip skb processing if sent from ifb dev.
+ */
+static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_skip_classify_offload) {
+ skb_clear_tc_classify_offload(skb);
+ return true;
+ }
+#endif
+ return false;
+}
+
/* Reset all TX qdiscs greater than index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
@@ -1342,4 +1377,9 @@ static inline void qdisc_synchronize(con
msleep(1);
}
+/* QCA NSS Qdisc Support - Start */
+void qdisc_destroy(struct Qdisc *qdisc);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+/* QCA NSS Qdisc Support - End */
+
#endif

View File

@@ -0,0 +1,46 @@
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -398,6 +398,31 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct l2tp_stats *stats)
+{
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &tunnel->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &tunnel->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &tunnel->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &tunnel->stats.tx_bytes);
+
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &session->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &session->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &session->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &session->stats.tx_bytes);
+}
+EXPORT_SYMBOL_GPL(l2tp_stats_update);
+
+
/*****************************************************************************
* Receive data handling
*****************************************************************************/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -232,6 +232,9 @@ struct l2tp_session *l2tp_session_get_nt
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View File

@@ -0,0 +1,478 @@
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -38,6 +38,7 @@ struct pptp_opt {
u32 ack_sent, ack_recv;
u32 seq_sent, seq_recv;
int ppp_flags;
+ bool pptp_offload_mode;
};
#include <net/sock.h>
@@ -102,8 +103,40 @@ struct pppoe_channel_ops {
int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
};
+/* PPTP client callback */
+typedef int (*pptp_gre_seq_offload_callback_t)(struct sk_buff *skb,
+ struct net_device *pptp_dev);
+
/* Return PPPoE channel specific addressing information */
extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
struct pppoe_opt *addressing);
+/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */
+extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr);
+
+/* Lookup PPTP session info and return PPTP session using dip and peer call id */
+extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Return PPTP session information given the channel */
+extern void pptp_channel_addressing_get(struct pptp_opt *opt,
+ struct ppp_channel *chan);
+
+/* Enable the PPTP session offload flag */
+extern int pptp_session_enable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Disable the PPTP session offload flag */
+extern int pptp_session_disable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Register the PPTP GRE packets sequence number offload callback */
+extern int
+pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_client_cb);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+extern void pptp_unregister_gre_seq_offload_callback(void);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2972,6 +2972,20 @@ char *ppp_dev_name(struct ppp_channel *c
return name;
}
+/* Return the PPP net device index */
+int ppp_dev_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int ifindex = 0;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ ifindex = pch->ppp->dev->ifindex;
+ read_unlock_bh(&pch->upl);
+ }
+ return ifindex;
+}
/*
* Disconnect a channel from the generic layer.
@@ -3680,6 +3694,28 @@ void ppp_update_stats(struct net_device
ppp_recv_unlock(ppp);
}
+/* Returns true if Compression is enabled on PPP device
+ */
+bool ppp_is_cp_enabled(struct net_device *dev)
+{
+ struct ppp *ppp;
+ bool flag = false;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flag = !!(ppp->xstate & SC_COMP_RUN) || !!(ppp->rstate & SC_DECOMP_RUN);
+ ppp_unlock(ppp);
+
+ return flag;
+}
+EXPORT_SYMBOL(ppp_is_cp_enabled);
+
/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
* the device is not PPP.
*/
@@ -3871,6 +3907,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_dev_index);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -82,6 +82,9 @@ extern void ppp_unregister_channel(struc
/* Get the channel number for a channel */
extern int ppp_channel_index(struct ppp_channel *);
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
@@ -114,6 +117,7 @@ extern int ppp_hold_channels(struct net_
/* Test if ppp xmit lock is locked */
extern bool ppp_is_xmit_locked(struct net_device *dev);
+bool ppp_is_cp_enabled(struct net_device *dev);
/* Test if the ppp device is a multi-link ppp device */
extern int ppp_is_multilink(struct net_device *dev);
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -50,6 +50,8 @@ static struct proto pptp_sk_proto __read
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
+static pptp_gre_seq_offload_callback_t __rcu pptp_gre_offload_xmit_cb;
+
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
@@ -91,6 +93,79 @@ static int lookup_chan_dst(u16 call_id,
return i < MAX_CALLID;
}
+/* Search a pptp session based on local call id, local and remote ip address */
+static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.src_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr &&
+ sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Search a pptp session based on peer call id and peer ip address */
+static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == d_addr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* If offload mode set then this function sends all packets to
+ * offload module instead of network stack
+ */
+static int pptp_client_skb_xmit(struct sk_buff *skb,
+ struct net_device *pptp_dev)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+ int ret;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (!pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ ret = pptp_gre_offload_cb_f(skb, pptp_dev);
+ rcu_read_unlock();
+ return ret;
+}
+
static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{
@@ -136,7 +211,7 @@ static struct rtable *pptp_route_output(
struct net *net;
net = sock_net(sk);
- flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
+ flowi4_init_output(fl4, 0, sk->sk_mark, 0,
RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
po->proto.pptp.dst_addr.sin_addr.s_addr,
po->proto.pptp.src_addr.sin_addr.s_addr,
@@ -163,8 +238,11 @@ static int pptp_xmit(struct ppp_channel
struct rtable *rt;
struct net_device *tdev;
+ struct net_device *pptp_dev;
struct iphdr *iph;
int max_headroom;
+ int pptp_ifindex;
+ int ret;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
@@ -258,7 +336,32 @@ static int pptp_xmit(struct ppp_channel
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
- ip_local_out(net, skb->sk, skb);
+ pptp_ifindex = ppp_dev_index(chan);
+
+ /* set incoming interface as the ppp interface */
+ if (skb->skb_iif)
+ skb->skb_iif = pptp_ifindex;
+
+ /* If the PPTP GRE seq number offload module is not enabled yet
+ * then sends all PPTP GRE packets through linux network stack
+ */
+ if (!opt->pptp_offload_mode) {
+ ip_local_out(net, skb->sk, skb);
+ return 1;
+ }
+
+ pptp_dev = dev_get_by_index(&init_net, pptp_ifindex);
+ if (!pptp_dev)
+ goto tx_error;
+
+ /* If PPTP offload module is enabled then forward all PPTP GRE
+ * packets to PPTP GRE offload module
+ */
+ ret = pptp_client_skb_xmit(skb, pptp_dev);
+ dev_put(pptp_dev);
+ if (ret < 0)
+ goto tx_error;
+
return 1;
tx_error:
@@ -314,6 +417,13 @@ static int pptp_rcv_core(struct sock *sk
goto drop;
payload = skb->data + headersize;
+
+ /* If offload is enabled, we expect the offload module
+ * to handle PPTP GRE sequence number checks
+ */
+ if (opt->pptp_offload_mode)
+ goto allow_packet;
+
/* check for expected sequence number */
if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
@@ -371,6 +481,7 @@ static int pptp_rcv(struct sk_buff *skb)
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
+ skb->skb_iif = ppp_dev_index(&po->chan);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
@@ -473,7 +584,7 @@ static int pptp_connect(struct socket *s
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state |= PPPOX_CONNECTED;
-
+ opt->pptp_offload_mode = false;
end:
release_sock(sk);
return error;
@@ -603,9 +714,169 @@ static int pptp_ppp_ioctl(struct ppp_cha
return err;
}
+/* pptp_channel_addressing_get()
+ * Return PPTP channel specific addressing information.
+ */
+void pptp_channel_addressing_get(struct pptp_opt *opt, struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct pppox_sock *po;
+
+ if (!opt)
+ return;
+
+ sk = (struct sock *)chan->private;
+ if (!sk)
+ return;
+
+ sock_hold(sk);
+
+ /* This is very unlikely, but check the socket is connected state */
+ if (unlikely(sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED))) {
+ sock_put(sk);
+ return;
+ }
+
+ po = pppox_sk(sk);
+ memcpy(opt, &po->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(pptp_channel_addressing_get);
+
+/* pptp_session_find()
+ * Search and return a PPTP session info based on peer callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_dst(opt, ntohs(peer_call_id), peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_find);
+
+/* pptp_session_find_by_src_callid()
+ * Search and return a PPTP session info based on src callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr);
+}
+EXPORT_SYMBOL(pptp_session_find_by_src_callid);
+
+ /* Function to change the offload mode true/false for a PPTP session */
+static int pptp_set_offload_mode(bool accel_mode,
+ __be16 peer_call_id, __be32 peer_ip_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == peer_call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == peer_ip_addr) {
+ sock_hold(sk_pppox(sock));
+ sock->proto.pptp.pptp_offload_mode = accel_mode;
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Enable the PPTP session offload flag */
+int pptp_session_enable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(true, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_enable_offload_mode);
+
+/* Disable the PPTP session offload flag */
+int pptp_session_disable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(false, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_disable_offload_mode);
+
+/* Register the offload callback function on behalf of the module which
+ * will own the sequence and acknowledgment number updates for all
+ * PPTP GRE packets. All PPTP GRE packets are then transmitted to this
+ * module after encapsulation in order to ensure the correct seq/ack
+ * fields are set in the packets before transmission. This is required
+ * when PPTP flows are offloaded to acceleration engines, in-order to
+ * ensure consistency in sequence and ack numbers between PPTP control
+ * (PPP LCP) and data packets
+ */
+int pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_gre_offload_cb)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, pptp_gre_offload_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(pptp_register_gre_seq_offload_callback);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+void pptp_unregister_gre_seq_offload_callback(void)
+{
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, NULL);
+}
+EXPORT_SYMBOL(pptp_unregister_gre_seq_offload_callback);
+
+/* pptp_hold_chan() */
+static void pptp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pptp_release_chan() */
+static void pptp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pptp_get_channel_protocol()
+ * Return the protocol type of the PPTP over PPP protocol
+ */
+static int pptp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_PPTP;
+}
+
static const struct ppp_channel_ops pptp_chan_ops = {
.start_xmit = pptp_xmit,
.ioctl = pptp_ppp_ioctl,
+ .get_channel_protocol = pptp_get_channel_protocol,
+ .hold = pptp_hold_chan,
+ .release = pptp_release_chan,
};
static struct proto pptp_sk_proto __read_mostly = {

View File

@@ -0,0 +1,77 @@
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ __u8 draft03; /* FMR using draft03 of map-e - QCA NSS Clients Support */
bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -534,4 +534,9 @@ static inline void ip_tunnel_info_opts_s
#endif /* CONFIG_INET */
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr);
+void ip6_update_offload_stats(struct net_device *dev, void *ptr);
+/* QCA NSS Clients Support - End */
+
#endif /* __NET_IP_TUNNELS_H */
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2440,6 +2440,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Client Support - Start */
+/*
+ * Update offload stats
+ */
+void ip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets += offload_stats->tx_packets;
+ tstats->tx_bytes += offload_stats->tx_bytes;
+ tstats->rx_packets += offload_stats->rx_packets;
+ tstats->rx_bytes += offload_stats->rx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ip6_update_offload_stats);
+/* QCA NSS Client Support - End */
+
struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1798,6 +1798,23 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets += offload_stats->tx_packets;
+ tstats->tx_bytes += offload_stats->tx_bytes;
+ tstats->rx_packets += offload_stats->rx_packets;
+ tstats->rx_bytes += offload_stats->rx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ipip6_update_offload_stats);
+/* QCA NSS Clients Support - End */
+
static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },

View File

@@ -0,0 +1,119 @@
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -90,6 +90,20 @@ struct vxlan_fdb {
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
+ATOMIC_NOTIFIER_HEAD(vxlan_fdb_notifier_list);
+
+void vxlan_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_register_notify);
+
+void vxlan_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_unregister_notify);
+
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
return vs->flags & VXLAN_F_COLLECT_METADATA ||
@@ -367,6 +381,7 @@ static void __vxlan_fdb_notify(struct vx
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
+ struct vxlan_fdb_event vfe;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
@@ -382,6 +397,10 @@ static void __vxlan_fdb_notify(struct vx
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ vfe.dev = vxlan->dev;
+ vfe.rdst = rd;
+ ether_addr_copy(vfe.eth_addr, fdb->eth_addr);
+ atomic_notifier_call_chain(&vxlan_fdb_notifier_list, type, (void *)&vfe);
return;
errout:
if (err < 0)
@@ -548,6 +567,18 @@ static struct vxlan_fdb *vxlan_find_mac(
return f;
}
+/* Find and update age of fdb entry corresponding to MAC. */
+void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni)
+{
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ vxlan_find_mac(vxlan, mac, vni);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+}
+EXPORT_SYMBOL(vxlan_fdb_update_mac);
+
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
@@ -2746,6 +2777,9 @@ static void vxlan_xmit_one(struct sk_buf
goto out_unlock;
}
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2817,6 +2851,9 @@ static void vxlan_xmit_one(struct sk_buf
if (err < 0)
goto tx_error;
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&local_ip.sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -293,6 +293,19 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA)
+/*
+ * Application data for fdb notifier event
+ */
+struct vxlan_fdb_event {
+ struct net_device *dev;
+ struct vxlan_rdst *rdst;
+ u8 eth_addr[ETH_ALEN];
+};
+
+extern void vxlan_fdb_register_notify(struct notifier_block *nb);
+extern void vxlan_fdb_unregister_notify(struct notifier_block *nb);
+extern void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni);
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -381,6 +394,15 @@ static inline __be32 vxlan_compute_rco(u
return vni_field;
}
+/*
+ * vxlan_get_vni()
+ * Returns the vni corresponding to tunnel
+ */
+static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun)
+{
+ return be32_to_cpu(vxlan_tun->cfg.vni);
+}
+
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
return vs->sock->sk->sk_family;

View File

@@ -0,0 +1,368 @@
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -59,6 +59,51 @@ struct ppp_channel {
};
#ifdef __KERNEL__
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+extern int __ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+extern int __ppp_is_multilink(struct net_device *dev);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
/* Called by the channel when it can send some more data. */
extern void ppp_output_wakeup(struct ppp_channel *);
@@ -146,5 +191,17 @@ extern void ppp_update_stats(struct net_
* that ppp_unregister_channel returns.
*/
+/* QCA NSS Clients Support - Start */
+/* PPP channel connection event types */
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+/* QCA NSS Clients Support - End */
+
#endif /* __KERNEL__ */
#endif
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -14,4 +14,30 @@
#include <linux/in6.h>
#include <uapi/linux/if_pppol2tp.h>
+/* QCA NSS ECM support - Start */
+/*
+ * Holds L2TP channel info
+ */
+struct pppol2tp_common_addr {
+ int tunnel_version; /* v2 or v3 */
+ __u32 local_tunnel_id, remote_tunnel_id; /* tunnel id */
+ __u32 local_session_id, remote_session_id; /* session id */
+ struct sockaddr_in local_addr, remote_addr; /* ip address and port */
+};
+
+/*
+ * L2TP channel operations
+ */
+struct pppol2tp_channel_ops {
+ struct ppp_channel_ops ops; /* ppp channel ops */
+};
+
+/*
+ * exported function which calls pppol2tp channel's get addressing
+ * function
+ */
+extern int pppol2tp_channel_addressing_get(struct ppp_channel *,
+ struct pppol2tp_common_addr *);
+/* QCA NSS ECM support - End */
+
#endif
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -123,9 +123,17 @@ struct pppol2tp_session {
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-
-static const struct ppp_channel_ops pppol2tp_chan_ops = {
- .start_xmit = pppol2tp_xmit,
+static int pppol2tp_get_channel_protocol(struct ppp_channel *);
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *);
+static void pppol2tp_hold_chan(struct ppp_channel *);
+static void pppol2tp_release_chan(struct ppp_channel *);
+
+static const struct pppol2tp_channel_ops pppol2tp_chan_ops = {
+ .ops.start_xmit = pppol2tp_xmit,
+ .ops.get_channel_protocol = pppol2tp_get_channel_protocol,
+ .ops.get_channel_protocol_ver = pppol2tp_get_channel_protocol_ver,
+ .ops.hold = pppol2tp_hold_chan,
+ .ops.release = pppol2tp_release_chan,
};
static const struct proto_ops pppol2tp_ops;
@@ -374,6 +382,13 @@ static int pppol2tp_xmit(struct ppp_chan
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
+ /* QCA NSS ECM support - start */
+ /* set incoming interface as the ppp interface */
+ if ((skb->protocol == htons(ETH_P_IP)) ||
+ (skb->protocol == htons(ETH_P_IPV6)))
+ skb->skb_iif = ppp_dev_index(chan);
+ /* QCA NSS ECM support - End */
+
local_bh_disable();
l2tp_xmit_skb(session, skb);
local_bh_enable();
@@ -819,7 +834,7 @@ static int pppol2tp_connect(struct socke
po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppol2tp_chan_ops.ops;
po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
error = ppp_register_net_channel(sock_net(sk), &po->chan);
@@ -1733,6 +1748,109 @@ static void __exit pppol2tp_exit(void)
unregister_pernet_device(&pppol2tp_net_ops);
}
+/* QCA NSS ECM support - Start */
+/* pppol2tp_hold_chan() */
+static void pppol2tp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pppol2tp_release_chan() */
+static void pppol2tp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pppol2tp_get_channel_protocol()
+ * Return the protocol type of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OL2TP;
+}
+
+/* pppol2tp_get_channel_protocol_ver()
+ * Return the protocol version of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int version = 0;
+
+ if (chan && chan->private)
+ sk = (struct sock *)chan->private;
+ else
+ return -1;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return -1;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return -1;
+ }
+
+ version = tunnel->version;
+
+ sock_put(sk);
+
+ return version;
+}
+
+/* pppol2tp_get_addressing() */
+static int pppol2tp_get_addressing(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct inet_sock *isk = NULL;
+ int err = -ENXIO;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return err;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return err;
+ }
+ isk = inet_sk(tunnel->sock);
+
+ addr->local_tunnel_id = tunnel->tunnel_id;
+ addr->remote_tunnel_id = tunnel->peer_tunnel_id;
+ addr->local_session_id = session->session_id;
+ addr->remote_session_id = session->peer_session_id;
+
+ addr->local_addr.sin_port = isk->inet_sport;
+ addr->remote_addr.sin_port = isk->inet_dport;
+ addr->local_addr.sin_addr.s_addr = isk->inet_saddr;
+ addr->remote_addr.sin_addr.s_addr = isk->inet_daddr;
+
+ sock_put(sk);
+ return 0;
+}
+
+/* pppol2tp_channel_addressing_get() */
+int pppol2tp_channel_addressing_get(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ return pppol2tp_get_addressing(chan, addr);
+}
+EXPORT_SYMBOL(pppol2tp_channel_addressing_get);
+/* QCA NSS ECM support - End */
+
module_init(pppol2tp_init);
module_exit(pppol2tp_exit);
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3742,6 +3742,32 @@ int ppp_is_multilink(struct net_device *
}
EXPORT_SYMBOL(ppp_is_multilink);
+/* __ppp_is_multilink()
+ * Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0
+ * if the device is not PPP. Caller should acquire ppp_lock before calling
+ * this function
+ */
+int __ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ flags = ppp->flags;
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__ppp_is_multilink);
+
/* ppp_channel_get_protocol()
* Call this to obtain the underlying protocol of the PPP channel,
* e.g. PX_PROTO_OE
@@ -3880,6 +3906,59 @@ int ppp_hold_channels(struct net_device
}
EXPORT_SYMBOL(ppp_hold_channels);
+/* __ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int __ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ return c;
+}
+EXPORT_SYMBOL(__ppp_hold_channels);
+
/* ppp_release_channels()
* Releases channels
*/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -235,6 +235,9 @@ struct l2tp_session *l2tp_session_get_by
void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
struct l2tp_stats *stats);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View File

@@ -0,0 +1,22 @@
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2446,7 +2446,7 @@ nla_put_failure:
*/
void ip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1801,7 +1801,7 @@ nla_put_failure:
/* QCA NSS Clients Support - Start */
void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;

View File

@@ -0,0 +1,931 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -234,4 +234,17 @@ extern br_get_dst_hook_t __rcu *br_get_d
extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
/* QCA NSS bridge-mgr support - End */
+/* QCA qca-mcs support - Start */
+typedef struct net_bridge_port *br_get_dst_hook_t(const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+
+typedef int (br_multicast_handle_hook_t)(const struct net_bridge_port *src,
+ struct sk_buff *skb);
+extern br_multicast_handle_hook_t __rcu *br_multicast_handle_hook;
+
+typedef void (br_notify_hook_t)(int group, int event, const void *ptr);
+extern br_notify_hook_t __rcu *br_notify_hook;
+/* QCA qca-mcs support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -185,6 +185,7 @@ struct net_bridge_fdb_entry *br_fdb_find
{
return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
}
+EXPORT_SYMBOL_GPL(br_fdb_find_rcu); /* QCA qca-mcs support */
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
@@ -887,6 +888,7 @@ static void fdb_notify(struct net_bridge
kfree_skb(skb);
goto errout;
}
+ __br_notify(RTNLGRP_NEIGH, type, fdb); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -833,6 +833,7 @@ void br_manage_promisc(struct net_bridge
int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
/* br_input.c */
+int br_pass_frame_up(struct sk_buff *skb); /* QCA qca-mcs support */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
rx_handler_func_t *br_get_rx_handler(const struct net_device *dev);
@@ -2105,4 +2106,14 @@ struct nd_msg *br_is_nd_neigh_msg(struct
#define __br_get(__hook, __default, __args ...) \
(__hook ? (__hook(__args)) : (__default))
/* QCA NSS ECM support - End */
+
+/* QCA qca-mcs support - Start */
+static inline void __br_notify(int group, int type, const void *data)
+{
+ br_notify_hook_t *notify_hook = rcu_dereference(br_notify_hook);
+
+ if (notify_hook)
+ notify_hook(group, type, data);
+}
+/* QCA qca-mcs support - End */
#endif
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -609,6 +609,7 @@ void br_info_notify(int event, const str
kfree_skb(skb);
goto errout;
}
+ __br_notify(RTNLGRP_LINK, event, port); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -455,6 +455,12 @@ static void __exit br_deinit(void)
br_fdb_fini();
}
+/* QCA qca-mcs support - Start */
+/* Hook for bridge event notifications */
+br_notify_hook_t __rcu *br_notify_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_notify_hook);
+/* QCA qca-mcs support - End */
+
module_init(br_init)
module_exit(br_deinit)
MODULE_LICENSE("GPL");
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -82,6 +82,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true);
} else if (is_multicast_ether_addr(dest)) {
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook = rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, NULL, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
+
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, BR_PKT_MULTICAST, false, true);
goto out;
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -23,6 +23,16 @@
#include "br_private.h"
#include "br_private_tunnel.h"
+/* QCA qca-mcs support - Start */
+/* Hook for external Multicast handler */
+br_multicast_handle_hook_t __rcu *br_multicast_handle_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_multicast_handle_hook);
+
+/* Hook for external forwarding logic */
+br_get_dst_hook_t __rcu *br_get_dst_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_get_dst_hook);
+/* QCA qca-mcs support - End */
+
static int
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
{
@@ -30,7 +40,7 @@ br_netif_receive_skb(struct net *net, st
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -69,6 +79,7 @@ static int br_pass_frame_up(struct sk_bu
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
}
+EXPORT_SYMBOL_GPL(br_pass_frame_up); /* QCA qca-mcs support */
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -84,6 +95,11 @@ int br_handle_frame_finish(struct net *n
struct net_bridge *br;
u16 vid = 0;
u8 state;
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook;
+ struct net_bridge_port *pdst = NULL;
+ br_get_dst_hook_t *get_dst_hook = rcu_dereference(br_get_dst_hook);
+ /* QCA qca-mcs support - End */
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
@@ -140,6 +156,11 @@ int br_handle_frame_finish(struct net *n
switch (pkt_type) {
case BR_PKT_MULTICAST:
+ /* QCA qca-mcs support - Start */
+ multicast_handle_hook = rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, p, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
mdst = br_mdb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
@@ -155,8 +176,15 @@ int br_handle_frame_finish(struct net *n
}
break;
case BR_PKT_UNICAST:
- dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
- break;
+ /* QCA qca-mcs support - Start */
+ pdst = __br_get(get_dst_hook, NULL, p, &skb);
+ if (pdst) {
+ if (!skb)
+ goto out;
+ } else {
+ /* QCA qca-mcs support - End */
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ }
default:
break;
}
@@ -171,12 +199,19 @@ int br_handle_frame_finish(struct net *n
dst->used = now;
br_forward(dst->dst, skb, local_rcv, false);
} else {
+ /* QCA qca-mcs support - Start */
+ if (pdst) {
+ br_forward(pdst, skb, local_rcv, false);
+ goto out1;
+ }
+ /* QCA qca-mcs support - End */
+
if (!mcast_hit)
br_flood(br, skb, pkt_type, local_rcv, false);
else
br_multicast_flood(mdst, skb, brmctx, local_rcv, false);
}
-
+out1: /* QCA qca-mcs support */
if (local_rcv)
return br_pass_frame_up(skb);
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -85,4 +85,44 @@ struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
+
+/* QCA ECM qca-mcs support - Start */
+#define IPMR_MFC_EVENT_UPDATE 1
+#define IPMR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ipmr_mfc_event_offload_callback_t)(__be32 origin, __be32 group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ u8 op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur to
+ * MFC. The callback is registered by offload modules
+ */
+extern bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ipmr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list, given a multicast group and source
+ */
+extern int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dst_cnt, u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out);
+/* QCA ECM qca-mcs support - End */
#endif
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -110,4 +110,47 @@ static inline int ip6mr_sk_done(struct s
return 0;
}
#endif
+
+/* QCA qca-mcs support - Start */
+#define IP6MR_MFC_EVENT_UPDATE 1
+#define IP6MR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ip6mr_mfc_event_offload_callback_t)(struct in6_addr *origin,
+ struct in6_addr *group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ uint8_t op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur
+ * to MFC. The callback is registered by offload modules
+ */
+extern bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ip6mr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list given a multicast group and source
+ */
+extern int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dst_cnt,
+ u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, uint64_t pkts_in,
+ uint64_t bytes_in, uint64_t pkts_out,
+ uint64_t bytes_out);
+/* QCA qca-mcs support - End */
#endif
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -108,6 +108,15 @@ static void igmpmsg_netlink_event(struct
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+/* QCA ECM qca-mcs support - Start */
+/* spinlock for offload */
+static DEFINE_SPINLOCK(lock);
+
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin,
+ __be32 mcastgrp);
+static ipmr_mfc_event_offload_callback_t __rcu ipmr_mfc_event_offload_callback;
+/* QCA ECM qca-mcs support - End */
+
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
@@ -222,6 +231,228 @@ static int ipmr_rule_fill(struct fib_rul
return 0;
}
+/* QCA ECM qca-mcs support - Start */
+/* ipmr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ipmr_sync_entry_update(struct mr_table *mrt,
+ struct mfc_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXVIFS];
+ __be32 origin;
+ __be32 group;
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ origin = cache->mfc_origin;
+ group = cache->mfc_mcastgrp;
+
+ read_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+ if (dest_if_count == MAXVIFS) {
+ read_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ return;
+ }
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ read_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, dest_if_count, dest_dev,
+ IPMR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ipmr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ipmr_sync_entry_delete(u32 origin, u32 group)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, 0, NULL, IPMR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+
+/* ipmr_register_mfc_event_offload_callback()
+ * Register the IPv4 Multicast update offload callback with IPMR
+ */
+bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ipmr_register_mfc_event_offload_callback);
+
+/* ipmr_unregister_mfc_event_offload_callback()
+ * De-register the IPv4 Multicast update offload callback with IPMR
+ */
+void ipmr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ipmr_unregister_mfc_event_offload_callback);
+
+/* ipmr_find_mfc_entry()
+ * Returns destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dest_cnt, u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ read_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ipmr_find_mfc_entry);
+
+/* ipmr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ read_lock(&mrt_lock);
+ if (!VIF_EXISTS(mrt, vif)) {
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+ read_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmr_mfc_stats_update);
+/* QCA ECM qca-mcs support - End */
+
static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.family = RTNL_FAMILY_IPMR,
.rule_size = sizeof(struct ipmr_rule),
@@ -1185,6 +1416,11 @@ static int ipmr_mfc_delete(struct mr_tab
mroute_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ipmr_sync_entry_delete(c->mfc_origin, c->mfc_mcastgrp);
+ /* QCA ECM qca-mcs support - End */
+
return 0;
}
@@ -1214,6 +1450,12 @@ static int ipmr_mfc_add(struct net *net,
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
+
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ipmr_sync_entry_update(mrt, c);
+ /* QCA ECM qca-mcs support - End */
+
return 0;
}
@@ -1274,6 +1516,7 @@ static void mroute_clean_tables(struct m
struct net *net = read_pnet(&mrt->net);
struct mr_mfc *c, *tmp;
struct mfc_cache *cache;
+ u32 origin, group; /* QCA ECM qca-mcs support */
LIST_HEAD(list);
int i;
@@ -1298,10 +1541,19 @@ static void mroute_clean_tables(struct m
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list);
cache = (struct mfc_cache *)c;
+ /* QCA ECM qca-mcs support - Start */
+ origin = cache->mfc_origin;
+ group = cache->mfc_mcastgrp;
+ /* QCA ECM qca-mcs support - End */
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
mrt->id);
mroute_netlink_event(mrt, cache, RTM_DELROUTE);
mr_cache_put(c);
+
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ipmr_sync_entry_delete(origin, group);
+ /* QCA ECM qca-mcs support - End */
}
}
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -95,6 +95,17 @@ static int ip6mr_rtm_dumproute(struct sk
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+/* QCA qca-mcs support - Start */
+/* Spinlock for offload */
+static DEFINE_SPINLOCK(lock);
+
+static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
+ const struct in6_addr *origin,
+ const struct in6_addr *mcastgrp);
+static ip6mr_mfc_event_offload_callback_t __rcu
+ ip6mr_mfc_event_offload_callback;
+/* QCA qca-mcs support - End */
+
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
@@ -380,6 +391,227 @@ static struct mr_table_ops ip6mr_mr_tabl
.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
};
+/* QCA qca-mcs support - Start */
+/* ip6mr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ip6mr_sync_entry_update(struct mr_table *mrt,
+ struct mfc6_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXMIFS];
+ struct in6_addr mc_origin, mc_group;
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ read_lock(&mrt_lock);
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ if (dest_if_count == MAXMIFS) {
+ read_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ return;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+
+ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr));
+ read_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(&mc_group, &mc_origin, dest_if_count, dest_dev,
+ IP6MR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ip6mr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ip6mr_sync_entry_delete(struct in6_addr *mc_origin,
+ struct in6_addr *mc_group)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(mc_group, mc_origin, 0, NULL,
+ IP6MR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+
+/* ip6mr_register_mfc_event_offload_callback()
+ * Register the IPv6 multicast update callback for offload modules
+ */
+bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ip6mr_register_mfc_event_offload_callback);
+
+/* ip6mr_unregister_mfc_event_offload_callback()
+ * De-register the IPv6 multicast update callback for offload modules
+ */
+void ip6mr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ip6mr_unregister_mfc_event_offload_callback);
+
+/* ip6mr_find_mfc_entry()
+ * Return the destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dest_cnt,
+ u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (!mrt)
+ return -ENOENT;
+
+ read_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ read_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ read_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ read_unlock(&mrt_lock);
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ip6mr_find_mfc_entry);
+
+/* ip6mr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u64 pkts_in,
+ u64 bytes_in, uint64_t pkts_out,
+ u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+
+ if (!mrt)
+ return -ENOENT;
+
+ read_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ read_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ if (!VIF_EXISTS(mrt, vif)) {
+ read_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ read_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+
+ read_unlock(&mrt_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ip6mr_mfc_stats_update);
+/* QCA qca-mcs support - End */
+
static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
{
struct mr_table *mrt;
@@ -1215,6 +1447,7 @@ static int ip6mr_mfc_delete(struct mr_ta
int parent)
{
struct mfc6_cache *c;
+ struct in6_addr mc_origin, mc_group; /* QCA qca-mcs support */
/* The entries are added/deleted only under RTNL */
rcu_read_lock();
@@ -1223,6 +1456,12 @@ static int ip6mr_mfc_delete(struct mr_ta
rcu_read_unlock();
if (!c)
return -ENOENT;
+
+ /* QCA qca-mcs support - Start */
+ memcpy(&mc_origin, &c->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &c->mf6c_mcastgrp, sizeof(struct in6_addr));
+ /* QCA qca-mcs support - End */
+
rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
list_del_rcu(&c->_c.list);
@@ -1230,6 +1469,12 @@ static int ip6mr_mfc_delete(struct mr_ta
FIB_EVENT_ENTRY_DEL, c, mrt->id);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ip6mr_sync_entry_delete(&mc_origin, &mc_group);
+ /* QCA qca-mcs support - End */
+
return 0;
}
@@ -1439,6 +1684,12 @@ static int ip6mr_mfc_add(struct net *net
call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
+
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ip6mr_sync_entry_update(mrt, c);
+ /* QCA qca-mcs support - End */
+
return 0;
}
@@ -1501,6 +1752,10 @@ static int ip6mr_mfc_add(struct net *net
static void mroute_clean_tables(struct mr_table *mrt, int flags)
{
+ /* QCA qca-mcs support - Start */
+ struct mfc6_cache *cache;
+ struct in6_addr mc_origin, mc_group;
+ /* QCA qca-mcs support - End */
struct mr_mfc *c, *tmp;
LIST_HEAD(list);
int i;
@@ -1523,13 +1778,23 @@ static void mroute_clean_tables(struct m
if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
(!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
continue;
+ /* QCA qca-mcs support - Start */
+ cache = (struct mfc6_cache *)c;
+ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr));
+ /* QCA qca-mcs support - End */
rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
list_del_rcu(&c->list);
call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
FIB_EVENT_ENTRY_DEL,
- (struct mfc6_cache *)c, mrt->id);
- mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
+ cache, mrt->id);
+ mr6_netlink_event(mrt, cache, RTM_DELROUTE);
mr_cache_put(c);
+
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ip6mr_sync_entry_delete(&mc_origin, &mc_group);
+ /* QCA qca-mcs support - End */
}
}

View File

@@ -0,0 +1,111 @@
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -417,6 +417,8 @@ static int crypto_authenc_create(struct
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ inst->alg.base.cra_flags |= (auth_base->cra_flags |
+ enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -101,6 +101,11 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * Set this flag if algorithm does not support SG list transforms
+ */
+#define CRYPTO_ALG_NOSUPP_SG 0x0000c000
+
+/*
* The algorithm may allocate memory during request processing, i.e. during
* encryption, decryption, or hashing. Users can request an algorithm with this
* flag unset if they can't handle memory allocation failures.
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -659,6 +659,7 @@ static int esp_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -670,6 +671,11 @@ static int esp_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -897,6 +903,7 @@ static int esp_input(struct xfrm_state *
u8 *iv;
struct scatterlist *sg;
int err = -EINVAL;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
goto out;
@@ -904,6 +911,12 @@ static int esp_input(struct xfrm_state *
if (elen <= 0)
goto out;
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -940,6 +946,7 @@ static int esp6_input(struct xfrm_state
__be32 *seqhi;
u8 *iv;
struct scatterlist *sg;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
ret = -EINVAL;
@@ -951,6 +958,12 @@ static int esp6_input(struct xfrm_state
goto out;
}
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;

View File

@@ -0,0 +1,24 @@
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -1523,7 +1523,6 @@
qcom,load-addr = <0x40000000>;
qcom,turbo-frequency;
- qcom,bridge-enabled;
qcom,gre-enabled;
qcom,gre-redir-enabled;
qcom,gre_tunnel_enabled;
@@ -1542,12 +1541,10 @@
qcom,vlan-enabled;
qcom,wlan-dataplane-offload-enabled;
qcom,wlanredirect-enabled;
- qcom,pxvlan-enabled;
qcom,vxlan-enabled;
qcom,match-enabled;
qcom,mirror-enabled;
- qcom,rmnet-enabled;
- qcom,clmap-enabled;
+ qcom,tstamp-enabled;
};
nss1: nss@40800000 {

View File

@@ -0,0 +1,160 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -197,7 +197,6 @@ extern struct net_device *br_port_dev_ge
unsigned char *addr,
struct sk_buff *skb,
unsigned int cookie);
-extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
const char *addr,
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1460,26 +1460,6 @@ void br_fdb_clear_offload(const struct n
EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
/* QCA NSS ECM support - Start */
-/* Refresh FDB entries for bridge packets being forwarded by offload engines */
-void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
-{
- struct net_bridge_port *p = br_port_get_rcu(dev);
-
- if (!p || p->state == BR_STATE_DISABLED)
- return;
-
- if (!is_valid_ether_addr(addr)) {
- pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
- addr);
- return;
- }
-
- rcu_read_lock();
- br_fdb_update(p->br, p, addr, 0, true);
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
-
/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
{
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3800,34 +3800,6 @@ int ppp_channel_get_proto_version(struct
}
EXPORT_SYMBOL(ppp_channel_get_proto_version);
-/* ppp_channel_hold()
- * Call this to hold a channel.
- *
- * Returns true on success or false if the hold could not happen.
- *
- * NOTE: chan must be protected against destruction during this call -
- * either by correct locking etc. or because you already have an implicit
- * or explicit hold to the channel already and this is an additional hold.
- */
-bool ppp_channel_hold(struct ppp_channel *chan)
-{
- if (!chan->ops->hold)
- return false;
-
- chan->ops->hold(chan);
- return true;
-}
-EXPORT_SYMBOL(ppp_channel_hold);
-
-/* ppp_channel_release()
- * Call this to release a hold you have upon a channel
- */
-void ppp_channel_release(struct ppp_channel *chan)
-{
- chan->ops->release(chan);
-}
-EXPORT_SYMBOL(ppp_channel_release);
-
/* Check if ppp xmit lock is on hold */
bool ppp_is_xmit_locked(struct net_device *dev)
{
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -144,12 +144,6 @@ extern int ppp_channel_get_protocol(stru
/* Call this get protocol version */
extern int ppp_channel_get_proto_version(struct ppp_channel *);
-/* Call this to hold a channel */
-extern bool ppp_channel_hold(struct ppp_channel *);
-
-/* Call this to release a hold you have upon a channel */
-extern void ppp_channel_release(struct ppp_channel *);
-
/* Release hold on PPP channels */
extern void ppp_release_channels(struct ppp_channel *channels[],
unsigned int chan_sz);
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -93,32 +93,6 @@ static int lookup_chan_dst(u16 call_id,
return i < MAX_CALLID;
}
-/* Search a pptp session based on local call id, local and remote ip address */
-static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr)
-{
- struct pppox_sock *sock;
- int i = 1;
-
- rcu_read_lock();
- for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
- sock = rcu_dereference(callid_sock[i]);
- if (!sock)
- continue;
-
- if (sock->proto.pptp.src_addr.call_id == call_id &&
- sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr &&
- sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) {
- sock_hold(sk_pppox(sock));
- memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
- sock_put(sk_pppox(sock));
- rcu_read_unlock();
- return 0;
- }
- }
- rcu_read_unlock();
- return -EINVAL;
-}
-
/* Search a pptp session based on peer call id and peer ip address */
static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr)
{
@@ -758,20 +732,6 @@ int pptp_session_find(struct pptp_opt *o
}
EXPORT_SYMBOL(pptp_session_find);
-/* pptp_session_find_by_src_callid()
- * Search and return a PPTP session info based on src callid and IP
- * address. The function accepts the parameters in network byte order.
- */
-int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
- __be32 daddr, __be32 saddr)
-{
- if (!opt)
- return -EINVAL;
-
- return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr);
-}
-EXPORT_SYMBOL(pptp_session_find_by_src_callid);
-
/* Function to change the offload mode true/false for a PPTP session */
static int pptp_set_offload_mode(bool accel_mode,
__be16 peer_call_id, __be32 peer_ip_addr)
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -111,10 +111,6 @@ typedef int (*pptp_gre_seq_offload_callb
extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
struct pppoe_opt *addressing);
-/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */
-extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
- __be32 daddr, __be32 saddr);
-
/* Lookup PPTP session info and return PPTP session using dip and peer call id */
extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
__be32 peer_ip_addr);