ipq806x: NSS Hardware Offloading Target Core and ECM patches

This commit is contained in:
ACwifidude
2022-10-13 08:25:59 -05:00
committed by Lucas Asvio
parent 55bee42e28
commit 4cabdd199b
9 changed files with 2249 additions and 0 deletions

View File

@@ -0,0 +1,215 @@
From c70758d96b22e4421a6afd824cb59e350c6a8040 Mon Sep 17 00:00:00 2001
From: Robert Marko <robert.marko@sartura.hr>
Date: Tue, 2 Jun 2020 22:09:15 +0200
Subject: [PATCH] Regulator: Add NSS VOLT
Signed-off-by: Robert Marko <robert.marko@sartura.hr>
---
drivers/regulator/Kconfig | 7 +++++++
drivers/regulator/Makefile | 1 +
2 files changed, 8 insertions(+)
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1423,5 +1423,12 @@ config REGULATOR_QCOM_LABIBB
boost regulator and IBB can be used as a negative boost regulator
for LCD display panel.
+config REGULATOR_NSS_VOLT
+ bool "Qualcomm IPQ806X NSS Voltage regulator"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This driver provides support for the Qualcomm IPQ806X NSS Voltage
+ regulator.
+
endif
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -171,5 +171,6 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
+obj-$(CONFIG_REGULATOR_NSS_VOLT) += nss-volt-ipq806x.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
--- /dev/null
+++ b/drivers/regulator/nss-volt-ipq806x.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/nss-volt-ipq806x.h>
+
+struct nss_data {
+ struct regulator *nss_reg;
+ u32 nss_core_vdd_nominal;
+ u32 nss_core_vdd_high;
+ u32 nss_core_threshold_freq;
+};
+
+static struct nss_data *data;
+
+int nss_ramp_voltage(unsigned long rate, bool ramp_up)
+{
+ int ret;
+ int curr_uV, uV;
+ struct regulator *reg;
+
+ if (!data) {
+ pr_err("NSS core regulator not init.\n");
+ return -ENODEV;
+ }
+
+ reg = data->nss_reg;
+
+ if (!reg) {
+ pr_err("NSS core regulator not found.\n");
+ return -EINVAL;
+ }
+
+ uV = data->nss_core_vdd_nominal;
+ if (rate >= data->nss_core_threshold_freq)
+ return data->nss_core_vdd_high;
+
+ curr_uV = regulator_get_voltage(reg);
+
+ if (ramp_up) {
+ if (uV <= curr_uV)
+ return 0;
+ } else {
+ if (uV >= curr_uV)
+ return 0;
+ }
+
+ ret = regulator_set_voltage(reg, uV, data->nss_core_vdd_high);
+ if (ret)
+ pr_err("NSS volt scaling failed (%d)\n", uV);
+
+ return ret;
+}
+
+static const struct of_device_id nss_ipq806x_match_table[] = {
+ { .compatible = "qcom,nss-common" },
+ {}
+};
+
+static int nss_volt_ipq806x_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nss_reg = devm_regulator_get(&pdev->dev, "nss_core");
+ ret = PTR_ERR_OR_ZERO(data->nss_reg);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev,
+ "nss_core regulator not ready, retry\n");
+ else
+ dev_err(&pdev->dev, "no regulator for nss_core: %d\n",
+ ret);
+
+ return ret;
+ }
+
+ if (of_property_read_u32(np, "nss_core_vdd_nominal",
+ &data->nss_core_vdd_nominal)) {
+ pr_warn("NSS core vdd nominal not found. Using defaults...\n");
+ data->nss_core_vdd_nominal = 1100000;
+ }
+
+ if (of_property_read_u32(np, "nss_core_vdd_high",
+ &data->nss_core_vdd_high)) {
+ pr_warn("NSS core vdd high not found. Using defaults...\n");
+ data->nss_core_vdd_high = 1150000;
+ }
+
+ if (of_property_read_u32(np, "nss_core_threshold_freq",
+ &data->nss_core_threshold_freq)) {
+ pr_warn("NSS core thres freq not found. Using defaults...\n");
+ data->nss_core_threshold_freq = 733000000;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static struct platform_driver nss_ipq806x_driver = {
+ .probe = nss_volt_ipq806x_probe,
+ .driver = {
+ .name = "nss-volt-ipq806x",
+ .owner = THIS_MODULE,
+ .of_match_table = nss_ipq806x_match_table,
+ },
+};
+
+static int __init nss_ipq806x_init(void)
+{
+ return platform_driver_register(&nss_ipq806x_driver);
+}
+late_initcall(nss_ipq806x_init);
+
+static void __exit nss_ipq806x_exit(void)
+{
+ platform_driver_unregister(&nss_ipq806x_driver);
+}
+module_exit(nss_ipq806x_exit);
+
--- a/include/linux/regulator/nss-volt-ipq806x.h
+++ b/include/linux/regulator/nss-volt-ipq806x.h
@@ -22,3 +22,28 @@
int nss_ramp_voltage(unsigned long rate, bool ramp_up);
#endif
+
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __QCOM_NSS_VOL_SCALING_H
+#define __QCOM_NSS_VOL_SCALING_H
+
+#include <linux/regulator/consumer.h>
+
+int nss_ramp_voltage(unsigned long rate, bool ramp_up);
+
+#endif

View File

@@ -0,0 +1,208 @@
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -283,6 +283,7 @@
#define EBI2_AON_CLK 281
#define NSSTCM_CLK_SRC 282
#define NSSTCM_CLK 283
+#define NSS_CORE_CLK 284 /* Virtual */
#define CE5_A_CLK_SRC 285
#define CE5_H_CLK_SRC 286
#define CE5_CORE_CLK_SRC 287
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -24,6 +24,10 @@
#include "clk-branch.h"
#include "clk-hfpll.h"
#include "reset.h"
+#include <linux/regulator/nss-volt-ipq806x.h>
+
+/* NSS safe parent index which will be used during NSS PLL rate change */
+static int gcc_ipq806x_nss_safe_parent;
static struct clk_pll pll0 = {
.l_reg = 0x30c4,
@@ -2995,6 +2999,139 @@ static struct clk_branch ce5_h_clk = {
},
};
+static int nss_core_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int ret;
+
+ /*
+ * When ramping up voltage, it needs to be done first. This ensures that
+ * the volt required will be available when you step up the frequency.
+ */
+ ret = nss_ramp_voltage(rate, true);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate(&ubi32_core1_src_clk.clkr.hw, rate,
+ parent_rate);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate(&ubi32_core2_src_clk.clkr.hw, rate,
+ parent_rate);
+
+ if (ret)
+ return ret;
+
+ /*
+ * When ramping down voltage, it needs to be set first. This ensures
+ * that the volt required will be available until you step down the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, false);
+
+ return ret;
+}
+
+static int
+nss_core_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ int ret;
+
+ /*
+ * When ramping up voltage needs to be done first. This ensures that
+ * the voltage required will be available when you step up the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, true);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate_and_parent(
+ &ubi32_core1_src_clk.clkr.hw, rate, parent_rate, index);
+ if (ret)
+ return ret;
+
+ ret = clk_dyn_rcg_ops.set_rate_and_parent(
+ &ubi32_core2_src_clk.clkr.hw, rate, parent_rate, index);
+
+ if (ret)
+ return ret;
+
+ /*
+ * When ramping down voltage needs to be done last. This ensures that
+ * the voltage required will be available when you step down the
+ * frequency.
+ */
+ ret = nss_ramp_voltage(rate, false);
+
+ return ret;
+}
+
+static int nss_core_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_dyn_rcg_ops.determine_rate(&ubi32_core1_src_clk.clkr.hw,
+ req);
+}
+
+static unsigned long
+nss_core_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ return clk_dyn_rcg_ops.recalc_rate(&ubi32_core1_src_clk.clkr.hw,
+ parent_rate);
+}
+
+static u8 nss_core_clk_get_parent(struct clk_hw *hw)
+{
+ return clk_dyn_rcg_ops.get_parent(&ubi32_core1_src_clk.clkr.hw);
+}
+
+static int nss_core_clk_set_parent(struct clk_hw *hw, u8 i)
+{
+ int ret;
+ struct clk_dyn_rcg *rcg;
+ struct freq_tbl f = { 200000000, P_PLL0, 2, 1, 2 };
+
+ /* P_PLL0 is 800 Mhz which needs to be divided for 200 Mhz */
+ if (i == gcc_ipq806x_nss_safe_parent) {
+ rcg = to_clk_dyn_rcg(&ubi32_core1_src_clk.clkr.hw);
+ clk_dyn_configure_bank(rcg, &f);
+
+ rcg = to_clk_dyn_rcg(&ubi32_core2_src_clk.clkr.hw);
+ clk_dyn_configure_bank(rcg, &f);
+
+ return 0;
+ }
+
+ ret = clk_dyn_rcg_ops.set_parent(&ubi32_core1_src_clk.clkr.hw, i);
+ if (ret)
+ return ret;
+
+ return clk_dyn_rcg_ops.set_parent(&ubi32_core2_src_clk.clkr.hw, i);
+}
+
+static const struct clk_ops clk_ops_nss_core = {
+ .set_rate = nss_core_clk_set_rate,
+ .set_rate_and_parent = nss_core_clk_set_rate_and_parent,
+ .determine_rate = nss_core_clk_determine_rate,
+ .recalc_rate = nss_core_clk_recalc_rate,
+ .get_parent = nss_core_clk_get_parent,
+ .set_parent = nss_core_clk_set_parent,
+};
+
+/* Virtual clock for nss core clocks */
+static struct clk_regmap nss_core_clk = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_core_clk",
+ .ops = &clk_ops_nss_core,
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap *gcc_ipq806x_clks[] = {
[PLL0] = &pll0.clkr,
[PLL0_VOTE] = &pll0_vote,
@@ -3114,6 +3251,7 @@ static struct clk_regmap *gcc_ipq806x_cl
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [NSS_CORE_CLK] = &nss_core_clk,
[PLL9] = &hfpll0.clkr,
[PLL10] = &hfpll1.clkr,
[PLL12] = &hfpll_l2.clkr,
@@ -3334,6 +3472,12 @@ static int gcc_ipq806x_probe(struct plat
if (!regmap)
return -ENODEV;
+ gcc_ipq806x_nss_safe_parent = qcom_find_src_index(&nss_core_clk.hw,
+ gcc_pxo_pll8_pll14_pll18_pll0_map,
+ P_PLL0);
+ if (gcc_ipq806x_nss_safe_parent < 0)
+ return gcc_ipq806x_nss_safe_parent;
+
/* Setup PLL18 static bits */
regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
regmap_write(regmap, 0x31b0, 0x3080);
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -805,6 +805,11 @@ static int clk_dyn_rcg_set_rate_and_pare
return __clk_dyn_rcg_set_rate(hw, rate);
}
+void clk_dyn_configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
+{
+ configure_bank(rcg, f);
+}
+
const struct clk_ops clk_rcg_ops = {
.enable = clk_enable_regmap,
.disable = clk_disable_regmap,
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -183,4 +183,7 @@ struct clk_rcg_dfs_data {
extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
const struct clk_rcg_dfs_data *rcgs,
size_t len);
+
+extern void clk_dyn_configure_bank(struct clk_dyn_rcg *rcg,
+ const struct freq_tbl *f);
#endif

View File

@@ -0,0 +1,11 @@
--- a/net/core/of_net.c
+++ b/net/core/of_net.c
@@ -39,7 +39,7 @@ int of_get_phy_mode(struct device_node *
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
if (!strcasecmp(pm, phy_modes(i))) {
*interface = i;
- return 0;
+ return i;
}
return -ENODEV;

View File

@@ -0,0 +1,759 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -69,6 +69,7 @@ void brioctl_set(int (*hook)(struct net
void __user *uarg));
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg);
+extern bool br_is_hairpin_enabled(struct net_device *dev);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@@ -192,4 +193,42 @@ static inline clock_t br_get_ageing_time
}
#endif
+/* QCA NSS ECM support - Start */
+extern struct net_device *br_port_dev_get(struct net_device *dev,
+ unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie);
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
+extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr,
+ __u16 vid);
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned char *addr,
+ unsigned int cookie);
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
+
+#define BR_FDB_EVENT_ADD 0x01
+#define BR_FDB_EVENT_DEL 0x02
+
+struct br_fdb_event {
+ struct net_device *dev;
+ unsigned char addr[6];
+ unsigned char is_local;
+ struct net_bridge *br;
+ struct net_device *orig_dev;
+};
+extern void br_fdb_register_notify(struct notifier_block *nb);
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_get_dst_hook_t(
+ const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -222,7 +222,12 @@ extern void vlan_vids_del_by_dev(struct
extern bool vlan_uses_dev(const struct net_device *dev);
+/* QCA NSS ECM support - Start */
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev);
+/* QCA NSS ECM support - End */
+
#else
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1713,6 +1713,27 @@ enum netdev_ml_priv_type {
ML_PRIV_CAN,
};
+/* QCA NSS ECM support - Start */
+enum netdev_priv_flags_ext {
+ IFF_EXT_TUN_TAP = 1<<0,
+ IFF_EXT_PPP_L2TPV2 = 1<<1,
+ IFF_EXT_PPP_L2TPV3 = 1<<2,
+ IFF_EXT_PPP_PPTP = 1<<3,
+ IFF_EXT_GRE_V4_TAP = 1<<4,
+ IFF_EXT_GRE_V6_TAP = 1<<5,
+ IFF_EXT_IFB = 1<<6,
+};
+
+#define IFF_EXT_TUN_TAP IFF_EXT_TUN_TAP
+#define IFF_EXT_PPP_L2TPV2 IFF_EXT_PPP_L2TPV2
+#define IFF_EXT_PPP_L2TPV3 IFF_EXT_PPP_L2TPV3
+#define IFF_EXT_PPP_PPTP IFF_EXT_PPP_PPTP
+#define IFF_EXT_GRE_V4_TAP IFF_EXT_GRE_V4_TAP
+#define IFF_EXT_GRE_V6_TAP IFF_EXT_GRE_V6_TAP
+#define IFF_EXT_IFB IFF_EXT_IFB
+/* QCA NSS ECM support - End */
+
+
/**
* struct net_device - The DEVICE structure.
*
@@ -2848,6 +2869,10 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ /* QCA NSS ECM Support - Start */
+ NETDEV_BR_JOIN,
+ NETDEV_BR_LEAVE,
+ /* QCA NSS ECM Support - End */
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -211,6 +211,11 @@ void rt6_multipath_rebalance(struct fib6
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb);
+int rt6_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -574,4 +574,15 @@ static inline void neigh_update_is_route
*notify = 1;
}
}
+
+/* QCA NSS ECM support - Start */
+struct neigh_mac_update {
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+};
+
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -234,6 +234,11 @@ struct rtable *rt_dst_alloc(struct net_d
bool nopolicy, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb);
+int ip_rt_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -556,4 +556,12 @@ static int __init vlan_offload_init(void
return 0;
}
+/* QCA NSS ECM support - Start */
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_next_dev);
+/* QCA NSS ECM support - End */
+
fs_initcall(vlan_offload_init);
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -38,6 +38,35 @@ static int fdb_insert(struct net_bridge
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int, bool);
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
+
+void br_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
+
+void br_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
+
+void br_fdb_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
+
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -343,6 +372,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
+ u8 mac_addr[6]; /* QCA NSS ECM support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -369,8 +399,15 @@ void br_fdb_cleanup(struct work_struct *
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
- if (!hlist_unhashed(&f->fdb_node))
+ if (!hlist_unhashed(&f->fdb_node)) {
+ ether_addr_copy(mac_addr, f->key.addr.addr);
fdb_delete(br, f, true);
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list, 0,
+ (void *)mac_addr);
+ /* QCA NSS ECM support - End */
+ }
spin_unlock_bh(&br->hash_lock);
}
}
@@ -618,6 +655,12 @@ void br_fdb_update(struct net_bridge *br
&fdb->flags)))
clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags);
+
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list,
+ 0, (void *)addr);
+ /* QCA NSS ECM support - End */
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -799,6 +842,25 @@ static void fdb_notify(struct net_bridge
br_offload_fdb_update(fdb);
+ /* QCA NSS ECM support - Start */
+ if (fdb->dst) {
+ int event;
+ struct br_fdb_event fdb_event;
+
+ if (type == RTM_NEWNEIGH)
+ event = BR_FDB_EVENT_ADD;
+ else
+ event = BR_FDB_EVENT_DEL;
+
+ fdb_event.dev = fdb->dst->dev;
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
+ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
+ event,
+ (void *)&fdb_event);
+ }
+ /* QCA NSS ECM support - End */
+
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
@@ -1382,3 +1444,62 @@ void br_fdb_clear_offload(const struct n
spin_unlock_bh(&p->br->hash_lock);
}
EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
+
+/* QCA NSS ECM support - Start */
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
+ addr);
+ return;
+ }
+
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, 0, true);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
+
+/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
+void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ if (likely(fdb)) {
+ fdb->updated = jiffies;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_fdb_entry_refresh);
+
+/* Look up the MAC address in the device's bridge fdb table */
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr, __u16 vid)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return NULL;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
+/* QCA NSS ECM support - End */
+
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
+#include <linux/version.h>
#include <net/dsa.h>
#include <net/sock.h>
#include <linux/if_vlan.h>
@@ -27,6 +28,12 @@
#include "br_private.h"
#include "br_private_offload.h"
+/* QCA NSS ECM support - Start */
+/* Hook for external forwarding logic */
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
+/* QCA NSS ECM support - End */
+
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
@@ -707,6 +714,8 @@ int br_add_if(struct net_bridge *br, str
kobject_uevent(&p->kobj, KOBJ_ADD);
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
+
return 0;
err6:
@@ -742,6 +751,8 @@ int br_del_if(struct net_bridge *br, str
if (!p || p->br != br)
return -EINVAL;
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
+
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
@@ -788,3 +799,75 @@ bool br_port_flag_is_set(const struct ne
return p->flags & flag;
}
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
+
+/* QCA NSS ECM support - Start */
+/* API to know if hairpin feature is enabled/disabled on this bridge port */
+bool br_is_hairpin_enabled(struct net_device *dev)
+{
+ struct net_bridge_port *port = br_port_get_check_rcu(dev);
+
+ if (likely(port))
+ return port->flags & BR_HAIRPIN_MODE;
+ return false;
+}
+EXPORT_SYMBOL_GPL(br_is_hairpin_enabled);
+
+/* br_port_dev_get()
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
+ * use that to try and determine the egress port for that skb.
+ * If not, or no egress port could be determined, use the given addr
+ * to identify the port to which it is reachable,
+ * returing a reference to the net device associated with that port.
+ *
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
+ * associated port.
+ */
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie)
+{
+ struct net_bridge_fdb_entry *fdbe;
+ struct net_bridge *br;
+ struct net_device *netdev = NULL;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If the hook exists and the skb isn't NULL, try and get the port */
+ if (skb) {
+ br_port_dev_get_hook_t *port_dev_get_hook;
+
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
+ if (port_dev_get_hook) {
+ struct net_bridge_port *pdst =
+ __br_get(port_dev_get_hook, NULL, dev, skb,
+ addr, cookie);
+ if (pdst) {
+ dev_hold(pdst->dev);
+ netdev = pdst->dev;
+ goto out;
+ }
+ }
+ }
+
+ /* Either there is no hook, or can't
+ * determine the port to use - fall back to using FDB
+ */
+
+ br = netdev_priv(dev);
+
+ /* Lookup the fdb entry and get reference to the port dev */
+ fdbe = br_fdb_find_rcu(br, addr, 0);
+ if (fdbe && fdbe->dst) {
+ netdev = fdbe->dst->dev; /* port device */
+ dev_hold(netdev);
+ }
+out:
+ rcu_read_unlock();
+ return netdev;
+}
+EXPORT_SYMBOL_GPL(br_port_dev_get);
+/* QCA NSS ECM support - End */
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -2120,4 +2120,9 @@ void br_do_proxy_suppress_arp(struct sk_
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
+
+/* QCA NSS ECM support - Start */
+#define __br_get(__hook, __default, __args ...) \
+ (__hook ? (__hook(__args)) : (__default))
+/* QCA NSS ECM support - End */
#endif
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,7 +1685,7 @@ const char *netdev_cmd_to_name(enum netd
N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
- N(PRE_CHANGEADDR)
+ N(PRE_CHANGEADDR) N(BR_JOIN) N(BR_LEAVE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1219,7 +1219,21 @@ static void neigh_update_hhs(struct neig
}
}
+/* QCA NSS ECM support - start */
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
+void neigh_mac_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
+
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
+/* QCA NSS ECM support - End */
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
@@ -1250,6 +1264,7 @@ static int __neigh_update(struct neighbo
int notify = 0;
struct net_device *dev;
int update_isrouter = 0;
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1264,6 +1279,8 @@ static int __neigh_update(struct neighbo
new = old;
goto out;
}
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
+
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
@@ -1301,6 +1318,11 @@ static int __neigh_update(struct neighbo
- compare new & old
- if they are different, check override flag
*/
+ /* QCA NSS ECM update - Start */
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
+ /* QCA NSS ECM update - End */
+
if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
@@ -1423,8 +1445,11 @@ out:
if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
neigh_update_gc_list(neigh);
- if (notify)
+ if (notify) {
neigh_update_notify(neigh, nlmsg_pid);
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
+ }
trace_neigh_update_done(neigh, err);
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1206,6 +1206,9 @@ static bool fib_valid_key_len(u32 key, u
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
+/* Define route change notification chain. */
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
@@ -1396,6 +1399,9 @@ int fib_table_insert(struct net *net, st
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_NEWROUTE, fi);
+
return 0;
out_remove_new_fa:
@@ -1766,6 +1772,9 @@ int fib_table_delete(struct net *net, st
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_DELROUTE, fa_to_delete->fa_info);
+
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
@@ -2394,6 +2403,20 @@ void __init fib_trie_init(void)
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_register_notifier);
+
+int ip_rt_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
+/* QCA NSS ECM support - End */
+
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -987,6 +987,7 @@ void inet6_ifa_finish_destroy(struct ine
kfree_rcu(ifp, rcu);
}
+EXPORT_SYMBOL(inet6_ifa_finish_destroy);
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
@@ -2048,6 +2049,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(str
return result;
}
+EXPORT_SYMBOL(ipv6_get_ifaddr);
/* Gets referenced address, destroys ifaddr */
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -649,6 +649,7 @@ void ndisc_send_ns(struct net_device *de
ndisc_send_skb(skb, daddr, saddr);
}
+EXPORT_SYMBOL(ndisc_send_ns);
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
const struct in6_addr *daddr)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3878,6 +3878,9 @@ out_free:
return ERR_PTR(err);
}
+/* Define route change notification chain. */
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
+
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
@@ -3889,6 +3892,10 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, rt);
+
fib6_info_release(rt);
return err;
@@ -3910,6 +3917,9 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -6352,6 +6362,20 @@ static int ip6_route_dev_notify(struct n
return NOTIFY_OK;
}
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_register_notifier);
+
+int rt6_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_unregister_notifier);
+/* QCA NSS ECM support - End */
+
/*
* /proc
*/
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1934,6 +1934,7 @@ static void ip6gre_tap_setup(struct net_
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags_ext |= IFF_EXT_GRE_V6_TAP; /* QCA NSS ECM Support */
netif_keep_dst(dev);
}
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1333,6 +1333,7 @@ static void ipgre_tap_setup(struct net_d
dev->netdev_ops = &gre_tap_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags_ext |= IFF_EXT_GRE_V4_TAP; /* QCA NSS ECM Support */
ip_tunnel_setup(dev, gre_tap_net_id);
}
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -31,4 +31,36 @@ void nf_ct_gre_keymap_destroy(struct nf_
bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple);
+
+/* QCA NSS ECM Support - Start */
+/* GRE is a mess: Four different standards */
+struct gre_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 rec:3,
+ srr:1,
+ seq:1,
+ key:1,
+ routing:1,
+ csum:1,
+ version:3,
+ reserved:4,
+ ack:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 csum:1,
+ routing:1,
+ key:1,
+ seq:1,
+ srr:1,
+ rec:3,
+ ack:1,
+ reserved:4,
+ version:3;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __be16 protocol;
+};
+/* QCA NSS ECM Support - End */
+
+
#endif /* _CONNTRACK_PROTO_GRE_H */
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -123,5 +123,6 @@ static inline void nf_ct_destroy_timeout
extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
+extern unsigned int *udp_get_timeouts(struct net *net);
#endif /* _NF_CONNTRACK_TIMEOUT_H */
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -29,10 +29,11 @@ static const unsigned int udp_timeouts[U
[UDP_CT_REPLIED] = 120*HZ,
};
-static unsigned int *udp_get_timeouts(struct net *net)
+unsigned int *udp_get_timeouts(struct net *net)
{
return nf_udp_pernet(net)->timeouts;
}
+EXPORT_SYMBOL(udp_get_timeouts);
static void udp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,

View File

@@ -0,0 +1,207 @@
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -113,6 +113,9 @@ struct netns_ct {
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct atomic_notifier_head nf_conntrack_chain;
+#endif
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -78,6 +78,11 @@ struct nf_exp_event {
int report;
};
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
+#endif
+
struct nf_ct_event_notifier {
int (*ct_event)(unsigned int events, const struct nf_ct_event *item);
int (*exp_event)(unsigned int events, const struct nf_exp_event *item);
@@ -111,11 +116,13 @@ static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
+#endif
e = nf_ct_ecache_find(ct);
if (e == NULL)
@@ -130,10 +137,12 @@ nf_conntrack_event_report(enum ip_conntr
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#else
@@ -145,10 +154,12 @@ static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#else
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -160,6 +160,21 @@ config NF_CONNTRACK_TIMEOUT
If unsure, say `N'.
+config NF_CONNTRACK_DSCPREMARK_EXT
+ bool 'Connection tracking extension for dscp remark target'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking extension
+ for dscp remark.
+
+config NF_CONNTRACK_CHAIN_EVENTS
+ bool "Register multiple callbacks to ct events"
+ depends on NF_CONNTRACK_EVENTS
+ help
+ Support multiple registrations.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2877,6 +2877,9 @@ int nf_conntrack_init_net(struct net *ne
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_helper_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain);
+#endif
return 0;
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -17,6 +17,9 @@
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+#include <linux/notifier.h>
+#endif
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
@@ -150,8 +153,15 @@ static int __nf_conntrack_eventmask_repo
rcu_read_unlock();
return 0;
}
-
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed, &item);
+
+ if (notify)
+ ret = notify->ct_event(events | missed, item);
+#else
ret = notify->ct_event(events | missed, item);
+#endif
rcu_read_unlock();
if (likely(ret >= 0 && missed == 0))
@@ -245,7 +255,11 @@ void nf_ct_expect_event_report(enum ip_c
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (!notify)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(exp->master);
@@ -264,6 +278,14 @@ out_unlock:
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
+#endif
+
void nf_conntrack_register_notifier(struct net *net,
const struct nf_ct_event_notifier *new)
{
@@ -278,6 +300,14 @@ void nf_conntrack_register_notifier(stru
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
+#endif
+
void nf_conntrack_unregister_notifier(struct net *net)
{
mutex_lock(&nf_ct_ecache_mutex);
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,6 +28,10 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */
+#endif
+
NF_CT_EXT_NUM,
};
@@ -40,6 +44,9 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
+/* QCA NSS ECM support - Start */
+#define NF_CT_EXT_DSCPREMARK_TYPE struct nf_ct_dscpremark_ext
+/* QCA NSS ECM support - End */
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -14,6 +14,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_LABEL
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o
obj-$(CONFIG_NETFILTER) = netfilter.o

View File

@@ -0,0 +1,577 @@
From ac4b71aecf237fd07a29788706d198b4e36fa660 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:32:54 +0100
Subject: [PATCH] Update 602-qca-add-pppoe-offload-support.patch for kernel
5.15
---
drivers/net/ppp/ppp_generic.c | 276 +++++++++++++++++++++++++++++++++-
drivers/net/ppp/pppoe.c | 82 +++++++++-
include/linux/if_pppox.h | 13 ++
include/linux/netdevice.h | 19 +++
include/linux/ppp_channel.h | 63 +++++++-
5 files changed, 445 insertions(+), 8 deletions(-)
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -48,6 +48,7 @@
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/if_pppox.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
@@ -253,6 +254,25 @@ struct ppp_net {
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/*
+ * Registration/Unregistration methods
+ * for PPP channel connect and disconnect event notifications.
+ */
+RAW_NOTIFIER_HEAD(ppp_channel_connection_notifier_list);
+
+void ppp_channel_connection_register_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_register_notify);
+
+void ppp_channel_connection_unregister_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_unregister_notify);
+
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
@@ -3450,7 +3470,10 @@ ppp_connect_channel(struct channel *pch,
struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
+ int ppp_proto;
+ int version;
+ int notify = 0;
pn = ppp_pernet(pch->chan_net);
mutex_lock(&pn->all_ppp_mutex);
@@ -3482,13 +3505,40 @@ ppp_connect_channel(struct channel *pch,
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
+
+ /* Set the netdev priv flag if the prototype
+ * is L2TP or PPTP. Return success in all cases
+ */
+ if (!pch->chan)
+ goto out2;
+
+ ppp_proto = ppp_channel_get_protocol(pch->chan);
+ if (ppp_proto == PX_PROTO_PPTP) {
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_PPTP;
+ } else if (ppp_proto == PX_PROTO_OL2TP) {
+ version = ppp_channel_get_proto_version(pch->chan);
+ if (version == 2)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV2;
+ else if (version == 3)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV3;
+ }
+ notify = 1;
+
+ out2:
ppp_unlock(ppp);
ret = 0;
-
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
+
+ if (notify && ppp && ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_CONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
return ret;
}
@@ -3506,6 +3556,13 @@ ppp_disconnect_channel(struct channel *p
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
+ if (ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_DISCONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
@@ -3585,6 +3642,222 @@ static void *unit_find(struct idr *p, in
return idr_find(p, n);
}
+/* Updates the PPP interface statistics. */
+void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return;
+
+ if (dev->type != ARPHRD_PPP)
+ return;
+
+ ppp = netdev_priv(dev);
+
+ ppp_xmit_lock(ppp);
+ ppp->stats64.tx_packets += tx_packets;
+ ppp->stats64.tx_bytes += tx_bytes;
+ ppp->dev->stats.tx_errors += tx_errors;
+ ppp->dev->stats.tx_dropped += tx_dropped;
+ if (tx_packets)
+ ppp->last_xmit = jiffies;
+ ppp_xmit_unlock(ppp);
+
+ ppp_recv_lock(ppp);
+ ppp->stats64.rx_packets += rx_packets;
+ ppp->stats64.rx_bytes += rx_bytes;
+ ppp->dev->stats.rx_errors += rx_errors;
+ ppp->dev->stats.rx_dropped += rx_dropped;
+ if (rx_packets)
+ ppp->last_recv = jiffies;
+ ppp_recv_unlock(ppp);
+}
+
+/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
+ * the device is not PPP.
+ */
+int ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flags = ppp->flags;
+ ppp_unlock(ppp);
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ppp_is_multilink);
+
+/* ppp_channel_get_protocol()
+ * Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ *
+ * NOTE: Some channels do not use PX sockets so the protocol value may be very
+ * different for them.
+ * NOTE: -1 indicates failure.
+ * NOTE: Once you know the channel protocol you may then either cast 'chan' to
+ * its sub-class or use the channel protocol specific API's as provided by that
+ * channel sub type.
+ */
+int ppp_channel_get_protocol(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol)
+ return -1;
+
+ return chan->ops->get_channel_protocol(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_protocol);
+
+/* ppp_channel_get_proto_version()
+ * Call this to get channel protocol version
+ */
+int ppp_channel_get_proto_version(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol_ver)
+ return -1;
+
+ return chan->ops->get_channel_protocol_ver(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_proto_version);
+
+/* ppp_channel_hold()
+ * Call this to hold a channel.
+ *
+ * Returns true on success or false if the hold could not happen.
+ *
+ * NOTE: chan must be protected against destruction during this call -
+ * either by correct locking etc. or because you already have an implicit
+ * or explicit hold to the channel already and this is an additional hold.
+ */
+bool ppp_channel_hold(struct ppp_channel *chan)
+{
+ if (!chan->ops->hold)
+ return false;
+
+ chan->ops->hold(chan);
+ return true;
+}
+EXPORT_SYMBOL(ppp_channel_hold);
+
+/* ppp_channel_release()
+ * Call this to release a hold you have upon a channel
+ */
+void ppp_channel_release(struct ppp_channel *chan)
+{
+ chan->ops->release(chan);
+}
+EXPORT_SYMBOL(ppp_channel_release);
+
+/* Check if ppp xmit lock is on hold */
+bool ppp_is_xmit_locked(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ if (!ppp)
+ return false;
+
+ if (spin_is_locked(&(ppp)->wlock))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ppp_is_xmit_locked);
+
+/* ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ ppp_lock(ppp);
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ ppp_unlock(ppp);
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ ppp_unlock(ppp);
+ return c;
+}
+EXPORT_SYMBOL(ppp_hold_channels);
+
+/* ppp_release_channels()
+ * Releases channels
+ */
+void ppp_release_channels(struct ppp_channel *channels[], unsigned int chan_sz)
+{
+ unsigned int c;
+
+ for (c = 0; c < chan_sz; ++c) {
+ struct ppp_channel *chan;
+
+ chan = channels[c];
+ chan->ops->release(chan);
+ }
+}
+EXPORT_SYMBOL(ppp_release_channels);
+
/* Module/initialization stuff */
module_init(ppp_init);
@@ -3601,6 +3874,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+EXPORT_SYMBOL(ppp_update_stats);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -62,6 +62,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/if_pppox.h>
@@ -87,7 +88,7 @@
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
-static const struct ppp_channel_ops pppoe_chan_ops;
+static const struct pppoe_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
static unsigned int pppoe_net_id __read_mostly;
@@ -692,7 +693,7 @@ static int pppoe_connect(struct socket *
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
- po->chan.ops = &pppoe_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
@@ -995,9 +996,80 @@ static int pppoe_fill_forward_path(struc
return 0;
}
-static const struct ppp_channel_ops pppoe_chan_ops = {
- .start_xmit = pppoe_xmit,
- .fill_forward_path = pppoe_fill_forward_path,
+/************************************************************************
+ *
+ * function called by generic PPP driver to hold channel
+ *
+ ***********************************************************************/
+static void pppoe_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/************************************************************************
+ *
+ * function called by generic PPP driver to release channel
+ *
+ ***********************************************************************/
+static void pppoe_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/************************************************************************
+ *
+ * function called to get the channel protocol type
+ *
+ ***********************************************************************/
+static int pppoe_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OE;
+}
+
+/************************************************************************
+ *
+ * function called to get the PPPoE channel addressing
+ * NOTE: This function returns a HOLD to the netdevice
+ *
+ ***********************************************************************/
+static int pppoe_get_addressing(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ int err = 0;
+
+ *addressing = po->proto.pppoe;
+ if (!addressing->dev)
+ return -ENODEV;
+
+ dev_hold(addressing->dev);
+ return err;
+}
+
+/* pppoe_channel_addressing_get()
+ * Return PPPoE channel specific addressing information.
+ */
+int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ return pppoe_get_addressing(chan, addressing);
+}
+EXPORT_SYMBOL(pppoe_channel_addressing_get);
+
+static const struct pppoe_channel_ops pppoe_chan_ops = {
+ /* PPPoE specific channel ops */
+ .get_addressing = pppoe_get_addressing,
+ /* General ppp channel ops */
+ .ops.start_xmit = pppoe_xmit,
+ .ops.get_channel_protocol = pppoe_get_channel_protocol,
+ .ops.hold = pppoe_hold_chan,
+ .ops.release = pppoe_release_chan,
+ .ops.fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -93,4 +93,17 @@ enum {
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
+/*
+ * PPPoE Channel specific operations
+ */
+struct pppoe_channel_ops {
+ /* Must be first - general to all PPP channels */
+ struct ppp_channel_ops ops;
+ int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
+};
+
+/* Return PPPoE channel specific addressing information */
+extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2029,6 +2029,7 @@ struct net_device {
unsigned int flags;
unsigned int priv_flags;
unsigned int extra_priv_flags;
+ unsigned int priv_flags_ext;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -19,6 +19,10 @@
#include <linux/skbuff.h>
#include <linux/poll.h>
#include <net/net_namespace.h>
+#include <linux/notifier.h>
+
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
struct ppp_channel;
@@ -28,9 +32,19 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ /* Get channel protocol type, one of PX_PROTO_XYZ or specific to
+ * the channel subtype
+ */
+ int (*get_channel_protocol)(struct ppp_channel *);
+ /* Get channel protocol version */
+ int (*get_channel_protocol_ver)(struct ppp_channel *);
+ /* Hold the channel from being destroyed */
+ void (*hold)(struct ppp_channel *);
+ /* Release hold on the channel */
+ void (*release)(struct ppp_channel *);
int (*fill_forward_path)(struct net_device_path_ctx *,
- struct net_device_path *,
- const struct ppp_channel *);
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
@@ -74,6 +88,51 @@ extern int ppp_unit_number(struct ppp_ch
/* Get the device name associated with a channel, or NULL if none */
extern char *ppp_dev_name(struct ppp_channel *);
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
+
/*
* SMP locking notes:
* The channel code must ensure that when it calls ppp_unregister_channel,

View File

@@ -0,0 +1,95 @@
From 3c17a0e1112be70071e98d5208da5b55dcec20a6 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:37:29 +0100
Subject: [PATCH] Update 605-qca-add-add-nss-bridge-mgr-support.patch for
kernel 5.15
---
include/linux/if_bridge.h | 4 ++++
net/bridge/br_fdb.c | 25 +++++++++++++++++++++----
2 files changed, 25 insertions(+), 4 deletions(-)
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -231,4 +231,8 @@ typedef struct net_bridge_port *br_get_d
extern br_get_dst_hook_t __rcu *br_get_dst_hook;
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
+/* QCA NSS bridge-mgr support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -67,6 +67,15 @@ void br_fdb_update_unregister_notify(str
EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br)
+{
+ dev_hold(br->dev);
+ return br->dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_bridge_dev_get_and_hold);
+/* QCA NSS bridge-mgr support - End */
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -372,7 +381,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
- u8 mac_addr[6]; /* QCA NSS ECM support */
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -400,12 +409,13 @@ void br_fdb_cleanup(struct work_struct *
} else {
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node)) {
- ether_addr_copy(mac_addr, f->key.addr.addr);
+ memset(&fdb_event, 0, sizeof(fdb_event));
+ ether_addr_copy(fdb_event.addr, f->key.addr.addr);
fdb_delete(br, f, true);
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list, 0,
- (void *)mac_addr);
+ (void *)&fdb_event);
/* QCA NSS ECM support - End */
}
spin_unlock_bh(&br->hash_lock);
@@ -623,6 +633,7 @@ void br_fdb_update(struct net_bridge *br
const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* some users want to always flood. */
if (hold_time(br) == 0)
@@ -648,6 +659,12 @@ void br_fdb_update(struct net_bridge *br
if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
+ /* QCA NSS bridge-mgr support - Start */
+ ether_addr_copy(fdb_event.addr, addr);
+ fdb_event.br = br;
+ fdb_event.orig_dev = READ_ONCE(fdb->dst->dev);
+ fdb_event.dev = source->dev;
+ /* QCA NSS bridge-mgr support - End */
WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
@@ -659,7 +676,7 @@ void br_fdb_update(struct net_bridge *br
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list,
- 0, (void *)addr);
+ 0, (void *)&fdb_event);
/* QCA NSS ECM support - End */
}

View File

@@ -0,0 +1,81 @@
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -249,6 +249,9 @@ static const struct flow_dissector_key f
},
};
+/* QCA NSS bonding support */
+static unsigned long bond_id_mask = 0xFFFFFFF0;
+
static struct flow_dissector flow_keys_bonding __read_mostly;
/*-------------------------- Forward declarations ---------------------------*/
@@ -4060,6 +4063,23 @@ static int bond_get_lowest_level_rcu(str
}
#endif
+/* QCA NSS bonding support */
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+ int bond_id = 0;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+ bond_id = bond->id;
+
+ return bond_id;
+}
+EXPORT_SYMBOL(bond_get_id);
+
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -5392,6 +5412,10 @@ static void bond_destructor(struct net_d
if (bond->rr_tx_counter)
free_percpu(bond->rr_tx_counter);
+
+ /* QCA NSS bonding support */
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
}
void bond_setup(struct net_device *bond_dev)
@@ -5969,7 +5993,14 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
- rtnl_unlock();
+ /* QCA NSS bonding support */
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
+
+ rtnl_unlock();
return 0;
}
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -255,6 +255,7 @@ struct bonding {
spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
+ u32 id; /* QCA NSS bonding */
};
#define bond_slave_get_rcu(dev) \
@@ -629,6 +630,7 @@ struct bond_net {
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_get_id(struct net_device *bond_dev); /* QCA NSS bonding support */
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);

View File

@@ -0,0 +1,96 @@
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -15,6 +15,13 @@ struct macvlan_port;
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
+/* QCA NSS ECM Support - Start */
+/*
+ * Callback for updating interface statistics for macvlan flows offloaded from host CPU.
+ */
+typedef void (*macvlan_offload_stats_update_cb_t)(struct net_device *dev, struct rtnl_link_stats64 *stats, bool update_mcast_rx_stats);
+/* QCA NSS ECM Support - End */
+
struct macvlan_dev {
struct net_device *dev;
struct list_head list;
@@ -34,6 +41,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ macvlan_offload_stats_update_cb_t offload_stats_update; /* QCA NSS ECM support */
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -106,4 +114,26 @@ static inline int macvlan_release_l2fw_o
macvlan->accel_priv = NULL;
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
}
+
+/* QCA NSS ECM Support - Start */
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline void
+macvlan_offload_stats_update(struct net_device *dev,
+ struct rtnl_link_stats64 *stats,
+ bool update_mcast_rx_stats)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->offload_stats_update(dev, stats, update_mcast_rx_stats);
+}
+
+static inline enum
+macvlan_mode macvlan_get_mode(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode;
+}
+#endif
+/* QCA NSS ECM Support - End */
#endif /* _LINUX_IF_MACVLAN_H */
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -930,6 +930,34 @@ static void macvlan_uninit(struct net_de
macvlan_port_destroy(port->dev);
}
+/* QCA NSS ECM Support - Start */
+/* Update macvlan statistics processed by offload engines */
+static void macvlan_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *offl_stats,
+ bool update_mcast_rx_stats)
+{
+ struct vlan_pcpu_stats *stats;
+ struct macvlan_dev *macvlan;
+
+ /* Is this a macvlan? */
+ if (!netif_is_macvlan(dev))
+ return;
+
+ macvlan = netdev_priv(dev);
+ stats = this_cpu_ptr(macvlan->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += offl_stats->rx_packets;
+ stats->rx_bytes += offl_stats->rx_bytes;
+ stats->tx_packets += offl_stats->tx_packets;
+ stats->tx_bytes += offl_stats->tx_bytes;
+ /* Update multicast statistics */
+ if (unlikely(update_mcast_rx_stats)) {
+ stats->rx_multicast += offl_stats->rx_packets;
+ }
+ u64_stats_update_end(&stats->syncp);
+}
+/* QCA NSS ECM Support - End */
+
static void macvlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -1465,6 +1493,7 @@ int macvlan_common_newlink(struct net *s
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->offload_stats_update = macvlan_dev_update_stats; /* QCA NSS ECM Support */
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])