ipq806x: NSS Hardware Offloading Shortcut SFE Patches
This commit is contained in:
@@ -0,0 +1,144 @@
|
|||||||
|
From 733a75729c1fbb478caaed875dd9c09a878a553d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Robert Marko <robimarko@gmail.com>
|
||||||
|
Date: Fri, 5 Jun 2020 11:44:27 +0200
|
||||||
|
Subject: [PATCH] Revert "ARM: dma-mapping: remove dmac_clean_range and
|
||||||
|
dmac_inv_range"
|
||||||
|
|
||||||
|
This partially reverts 'commit 702b94bff3c505 ("ARM: dma-mapping:
|
||||||
|
remove dmac_clean_range and dmac_inv_range")'
|
||||||
|
|
||||||
|
Some MSM drivers still use the dmac_clean and dmac_inv_range APIs.
|
||||||
|
Bring back the defines and exports for v7 CPUs.
|
||||||
|
|
||||||
|
Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
|
||||||
|
Signed-off-by: Abhimanyu Kapur <abhimany@codeaurora.org>
|
||||||
|
[sramana: resolved minor merge conflicts]
|
||||||
|
Signed-off-by: Srinivas Ramana <sramana@codeaurora.org>
|
||||||
|
(cherry picked from commit d6118c0a9f7ab2b131ca36dd3dbd5634603d14fe)
|
||||||
|
|
||||||
|
Change-Id: Ib2ddb4452711c5c2013bf29f0b5d8a3572b10357
|
||||||
|
Signed-off-by: Manoharan Vijaya Raghavan <mraghava@codeaurora.org>
|
||||||
|
|
||||||
|
Signed-off-by: Robert Marko <robimarko@gmail.com>
|
||||||
|
---
|
||||||
|
arch/arm/include/asm/cacheflush.h | 21 +++++++++++++++++++++
|
||||||
|
arch/arm/include/asm/glue-cache.h | 2 ++
|
||||||
|
arch/arm/mm/cache-v7.S | 6 ++++--
|
||||||
|
arch/arm/mm/proc-macros.S | 2 ++
|
||||||
|
arch/arm/mm/proc-syms.c | 3 +++
|
||||||
|
5 files changed, 32 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
--- a/arch/arm/include/asm/cacheflush.h
|
||||||
|
+++ b/arch/arm/include/asm/cacheflush.h
|
||||||
|
@@ -91,6 +91,21 @@
|
||||||
|
* DMA Cache Coherency
|
||||||
|
* ===================
|
||||||
|
*
|
||||||
|
+ * dma_inv_range(start, end)
|
||||||
|
+ *
|
||||||
|
+ * Invalidate (discard) the specified virtual address range.
|
||||||
|
+ * May not write back any entries. If 'start' or 'end'
|
||||||
|
+ * are not cache line aligned, those lines must be written
|
||||||
|
+ * back.
|
||||||
|
+ * - start - virtual start address
|
||||||
|
+ * - end - virtual end address
|
||||||
|
+ *
|
||||||
|
+ * dma_clean_range(start, end)
|
||||||
|
+ *
|
||||||
|
+ * Clean (write back) the specified virtual address range.
|
||||||
|
+ * - start - virtual start address
|
||||||
|
+ * - end - virtual end address
|
||||||
|
+ *
|
||||||
|
* dma_flush_range(start, end)
|
||||||
|
*
|
||||||
|
* Clean and invalidate the specified virtual address range.
|
||||||
|
@@ -112,6 +127,8 @@ struct cpu_cache_fns {
|
||||||
|
void (*dma_map_area)(const void *, size_t, int);
|
||||||
|
void (*dma_unmap_area)(const void *, size_t, int);
|
||||||
|
|
||||||
|
+ void (*dma_inv_range)(const void *, const void *);
|
||||||
|
+ void (*dma_clean_range)(const void *, const void *);
|
||||||
|
void (*dma_flush_range)(const void *, const void *);
|
||||||
|
} __no_randomize_layout;
|
||||||
|
|
||||||
|
@@ -137,6 +154,8 @@ extern struct cpu_cache_fns cpu_cache;
|
||||||
|
* is visible to DMA, or data written by DMA to system memory is
|
||||||
|
* visible to the CPU.
|
||||||
|
*/
|
||||||
|
+#define dmac_inv_range cpu_cache.dma_inv_range
|
||||||
|
+#define dmac_clean_range cpu_cache.dma_clean_range
|
||||||
|
#define dmac_flush_range cpu_cache.dma_flush_range
|
||||||
|
|
||||||
|
#else
|
||||||
|
@@ -156,6 +175,8 @@ extern void __cpuc_flush_dcache_area(voi
|
||||||
|
* is visible to DMA, or data written by DMA to system memory is
|
||||||
|
* visible to the CPU.
|
||||||
|
*/
|
||||||
|
+extern void dmac_inv_range(const void *, const void *);
|
||||||
|
+extern void dmac_clean_range(const void *, const void *);
|
||||||
|
extern void dmac_flush_range(const void *, const void *);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
--- a/arch/arm/include/asm/glue-cache.h
|
||||||
|
+++ b/arch/arm/include/asm/glue-cache.h
|
||||||
|
@@ -156,6 +156,8 @@ static inline void nop_dma_unmap_area(co
|
||||||
|
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
|
||||||
|
|
||||||
|
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
|
||||||
|
+#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
|
||||||
|
+#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
--- a/arch/arm/mm/cache-v7.S
|
||||||
|
+++ b/arch/arm/mm/cache-v7.S
|
||||||
|
@@ -363,7 +363,7 @@ ENDPROC(v7_flush_kern_dcache_area)
|
||||||
|
* - start - virtual start address of region
|
||||||
|
* - end - virtual end address of region
|
||||||
|
*/
|
||||||
|
-v7_dma_inv_range:
|
||||||
|
+ENTRY(v7_dma_inv_range)
|
||||||
|
dcache_line_size r2, r3
|
||||||
|
sub r3, r2, #1
|
||||||
|
tst r0, r3
|
||||||
|
@@ -393,7 +393,7 @@ ENDPROC(v7_dma_inv_range)
|
||||||
|
* - start - virtual start address of region
|
||||||
|
* - end - virtual end address of region
|
||||||
|
*/
|
||||||
|
-v7_dma_clean_range:
|
||||||
|
+ENTRY(v7_dma_clean_range)
|
||||||
|
dcache_line_size r2, r3
|
||||||
|
sub r3, r2, #1
|
||||||
|
bic r0, r0, r3
|
||||||
|
@@ -479,6 +479,8 @@ ENDPROC(v7_dma_unmap_area)
|
||||||
|
|
||||||
|
globl_equ b15_dma_map_area, v7_dma_map_area
|
||||||
|
globl_equ b15_dma_unmap_area, v7_dma_unmap_area
|
||||||
|
+ globl_equ b15_dma_inv_range, v7_dma_inv_range
|
||||||
|
+ globl_equ b15_dma_clean_range, v7_dma_clean_range
|
||||||
|
globl_equ b15_dma_flush_range, v7_dma_flush_range
|
||||||
|
|
||||||
|
define_cache_functions b15
|
||||||
|
--- a/arch/arm/mm/proc-macros.S
|
||||||
|
+++ b/arch/arm/mm/proc-macros.S
|
||||||
|
@@ -335,6 +335,8 @@ ENTRY(\name\()_cache_fns)
|
||||||
|
.long \name\()_flush_kern_dcache_area
|
||||||
|
.long \name\()_dma_map_area
|
||||||
|
.long \name\()_dma_unmap_area
|
||||||
|
+ .long \name\()_dma_inv_range
|
||||||
|
+ .long \name\()_dma_clean_range
|
||||||
|
.long \name\()_dma_flush_range
|
||||||
|
.size \name\()_cache_fns, . - \name\()_cache_fns
|
||||||
|
.endm
|
||||||
|
--- a/arch/arm/mm/proc-syms.c
|
||||||
|
+++ b/arch/arm/mm/proc-syms.c
|
||||||
|
@@ -27,6 +27,9 @@ EXPORT_SYMBOL(__cpuc_flush_user_all);
|
||||||
|
EXPORT_SYMBOL(__cpuc_flush_user_range);
|
||||||
|
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
|
||||||
|
EXPORT_SYMBOL(__cpuc_flush_dcache_area);
|
||||||
|
+EXPORT_SYMBOL(dmac_inv_range);
|
||||||
|
+EXPORT_SYMBOL(dmac_clean_range);
|
||||||
|
+EXPORT_SYMBOL(dmac_flush_range);
|
||||||
|
#else
|
||||||
|
EXPORT_SYMBOL(cpu_cache);
|
||||||
|
#endif
|
||||||
263
target/linux/ipq806x/patches-5.4/999-01a-nss_sfe-support.patch
Normal file
263
target/linux/ipq806x/patches-5.4/999-01a-nss_sfe-support.patch
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
--- a/include/linux/timer.h
|
||||||
|
+++ b/include/linux/timer.h
|
||||||
|
@@ -17,6 +17,7 @@ struct timer_list {
|
||||||
|
unsigned long expires;
|
||||||
|
void (*function)(struct timer_list *);
|
||||||
|
u32 flags;
|
||||||
|
+ unsigned long cust_data;
|
||||||
|
|
||||||
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
struct lockdep_map lockdep_map;
|
||||||
|
--- a/include/linux/if_bridge.h
|
||||||
|
+++ b/include/linux/if_bridge.h
|
||||||
|
@@ -51,6 +51,8 @@
|
||||||
|
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
||||||
|
|
||||||
|
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
|
||||||
|
+extern void br_dev_update_stats(struct net_device *dev,
|
||||||
|
+ struct rtnl_link_stats64 *nlstats);
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
|
||||||
|
int br_multicast_list_adjacent(struct net_device *dev,
|
||||||
|
--- a/include/net/netns/conntrack.h
|
||||||
|
+++ b/include/net/netns/conntrack.h
|
||||||
|
@@ -112,6 +112,9 @@ struct netns_ct {
|
||||||
|
|
||||||
|
struct ct_pcpu __percpu *pcpu_lists;
|
||||||
|
struct ip_conntrack_stat __percpu *stat;
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ struct atomic_notifier_head nf_conntrack_chain;
|
||||||
|
+#endif
|
||||||
|
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
|
||||||
|
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
|
||||||
|
struct nf_ip_net nf_ct_proto;
|
||||||
|
--- a/include/net/netfilter/nf_conntrack_ecache.h
|
||||||
|
+++ b/include/net/netfilter/nf_conntrack_ecache.h
|
||||||
|
@@ -72,6 +72,11 @@ struct nf_ct_event {
|
||||||
|
int report;
|
||||||
|
};
|
||||||
|
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
|
||||||
|
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
struct nf_ct_event_notifier {
|
||||||
|
int (*fcn)(unsigned int events, struct nf_ct_event *item);
|
||||||
|
};
|
||||||
|
@@ -105,11 +110,13 @@ static inline void
|
||||||
|
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||||
|
- struct net *net = nf_ct_net(ct);
|
||||||
|
struct nf_conntrack_ecache *e;
|
||||||
|
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ struct net *net = nf_ct_net(ct);
|
||||||
|
|
||||||
|
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
|
||||||
|
return;
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
e = nf_ct_ecache_find(ct);
|
||||||
|
if (e == NULL)
|
||||||
|
@@ -124,10 +131,12 @@ nf_conntrack_event_report(enum ip_conntr
|
||||||
|
u32 portid, int report)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||||
|
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
const struct net *net = nf_ct_net(ct);
|
||||||
|
|
||||||
|
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
|
||||||
|
return 0;
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
|
||||||
|
#else
|
||||||
|
@@ -139,10 +148,12 @@ static inline int
|
||||||
|
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||||
|
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
const struct net *net = nf_ct_net(ct);
|
||||||
|
|
||||||
|
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
|
||||||
|
return 0;
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
|
||||||
|
#else
|
||||||
|
--- a/net/netfilter/nf_conntrack_proto_tcp.c
|
||||||
|
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
|
||||||
|
@@ -33,11 +33,13 @@
|
||||||
|
|
||||||
|
/* Do not check the TCP window for incoming packets */
|
||||||
|
-static int nf_ct_tcp_no_window_check __read_mostly = 1;
|
||||||
|
+int nf_ct_tcp_no_window_check __read_mostly = 1;
|
||||||
|
+EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check);
|
||||||
|
|
||||||
|
/* "Be conservative in what you do,
|
||||||
|
be liberal in what you accept from others."
|
||||||
|
If it's non-zero, we mark only out of window RST segments as INVALID. */
|
||||||
|
-static int nf_ct_tcp_be_liberal __read_mostly = 0;
|
||||||
|
+int nf_ct_tcp_be_liberal __read_mostly = 0;
|
||||||
|
+EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal);
|
||||||
|
|
||||||
|
/* If it is set to zero, we disable picking up already established
|
||||||
|
connections. */
|
||||||
|
--- a/net/bridge/br_if.c
|
||||||
|
+++ b/net/bridge/br_if.c
|
||||||
|
@@ -757,3 +757,26 @@
|
||||||
|
return p->flags & flag;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
|
||||||
|
+
|
||||||
|
+/* Update bridge statistics for bridge packets processed by offload engines */
|
||||||
|
+void br_dev_update_stats(struct net_device *dev,
|
||||||
|
+ struct rtnl_link_stats64 *nlstats)
|
||||||
|
+{
|
||||||
|
+ struct net_bridge *br;
|
||||||
|
+ struct pcpu_sw_netstats *stats;
|
||||||
|
+
|
||||||
|
+ /* Is this a bridge? */
|
||||||
|
+ if (!(dev->priv_flags & IFF_EBRIDGE))
|
||||||
|
+ return;
|
||||||
|
+
|
||||||
|
+ br = netdev_priv(dev);
|
||||||
|
+ stats = this_cpu_ptr(br->stats);
|
||||||
|
+
|
||||||
|
+ u64_stats_update_begin(&stats->syncp);
|
||||||
|
+ stats->rx_packets += nlstats->rx_packets;
|
||||||
|
+ stats->rx_bytes += nlstats->rx_bytes;
|
||||||
|
+ stats->tx_packets += nlstats->tx_packets;
|
||||||
|
+ stats->tx_bytes += nlstats->tx_bytes;
|
||||||
|
+ u64_stats_update_end(&stats->syncp);
|
||||||
|
+}
|
||||||
|
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
|
||||||
|
--- a/net/netfilter/Kconfig
|
||||||
|
+++ b/net/netfilter/Kconfig
|
||||||
|
@@ -158,6 +158,14 @@
|
||||||
|
|
||||||
|
If unsure, say `N'.
|
||||||
|
|
||||||
|
+config NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ bool "Register multiple callbacks to ct events"
|
||||||
|
+ depends on NF_CONNTRACK_EVENTS
|
||||||
|
+ help
|
||||||
|
+ Support multiple registrations.
|
||||||
|
+
|
||||||
|
+ If unsure, say `N'.
|
||||||
|
+
|
||||||
|
config NF_CONNTRACK_TIMESTAMP
|
||||||
|
bool 'Connection tracking timestamping'
|
||||||
|
depends on NETFILTER_ADVANCED
|
||||||
|
--- a/net/netfilter/nf_conntrack_core.c
|
||||||
|
+++ b/net/netfilter/nf_conntrack_core.c
|
||||||
|
@@ -2588,6 +2588,9 @@ int nf_conntrack_init_net(struct net *ne
|
||||||
|
nf_conntrack_ecache_pernet_init(net);
|
||||||
|
nf_conntrack_helper_pernet_init(net);
|
||||||
|
nf_conntrack_proto_pernet_init(net);
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain);
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
--- a/net/netfilter/nf_conntrack_ecache.c
|
||||||
|
+++ b/net/netfilter/nf_conntrack_ecache.c
|
||||||
|
@@ -17,6 +17,9 @@
|
||||||
|
#include <linux/stddef.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+#include <linux/notifier.h>
|
||||||
|
+#endif
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/netdevice.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
@@ -127,7 +130,11 @@ int nf_conntrack_eventmask_report(unsign
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
|
||||||
|
+#else
|
||||||
|
if (!notify)
|
||||||
|
+#endif
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
e = nf_ct_ecache_find(ct);
|
||||||
|
@@ -146,7 +153,15 @@ int nf_conntrack_eventmask_report(unsign
|
||||||
|
if (!((eventmask | missed) & e->ctmask))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
|
||||||
|
+ eventmask | missed, &item);
|
||||||
|
+
|
||||||
|
+ if (notify)
|
||||||
|
+ ret = notify->fcn(eventmask | missed, &item);
|
||||||
|
+#else
|
||||||
|
ret = notify->fcn(eventmask | missed, &item);
|
||||||
|
+#endif
|
||||||
|
if (unlikely(ret < 0 || missed)) {
|
||||||
|
spin_lock_bh(&ct->lock);
|
||||||
|
if (ret < 0) {
|
||||||
|
@@ -186,7 +201,11 @@ void nf_ct_deliver_cached_events(struct
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
|
||||||
|
+#else
|
||||||
|
if (notify == NULL)
|
||||||
|
+#endif
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
e = nf_ct_ecache_find(ct);
|
||||||
|
@@ -210,7 +229,16 @@ void nf_ct_deliver_cached_events(struct
|
||||||
|
item.portid = 0;
|
||||||
|
item.report = 0;
|
||||||
|
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
|
||||||
|
+ events | missed,
|
||||||
|
+ &item);
|
||||||
|
+
|
||||||
|
+ if (notify != NULL)
|
||||||
|
+ ret = notify->fcn(events | missed, &item);
|
||||||
|
+#else
|
||||||
|
ret = notify->fcn(events | missed, &item);
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
if (likely(ret == 0 && !missed))
|
||||||
|
goto out_unlock;
|
||||||
|
@@ -257,6 +285,14 @@ out_unlock:
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
|
||||||
|
+{
|
||||||
|
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
|
||||||
|
+}
|
||||||
|
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
int nf_conntrack_register_notifier(struct net *net,
|
||||||
|
struct nf_ct_event_notifier *new)
|
||||||
|
{
|
||||||
|
@@ -279,6 +315,14 @@ out_unlock:
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
|
||||||
|
|
||||||
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
||||||
|
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
|
||||||
|
+{
|
||||||
|
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
|
||||||
|
+}
|
||||||
|
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
void nf_conntrack_unregister_notifier(struct net *net,
|
||||||
|
struct nf_ct_event_notifier *new)
|
||||||
|
{
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
--- a/include/linux/skbuff.h
|
||||||
|
+++ b/include/linux/skbuff.h
|
||||||
|
@@ -825,6 +825,9 @@
|
||||||
|
__u8 decrypted:1;
|
||||||
|
#endif
|
||||||
|
__u8 gro_skip:1;
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+ __u8 fast_forwarded:1;
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_SCHED
|
||||||
|
__u16 tc_index; /* traffic control index */
|
||||||
|
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
|
||||||
|
int br_multicast_list_adjacent(struct net_device *dev,
|
||||||
|
--- a/net/Kconfig
|
||||||
|
+++ b/net/Kconfig
|
||||||
|
@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT
|
||||||
|
# Extended BPF JIT (eBPF)
|
||||||
|
config HAVE_EBPF_JIT
|
||||||
|
bool
|
||||||
|
+
|
||||||
|
+config SHORTCUT_FE
|
||||||
|
+ bool "Enables kernel network stack path for Shortcut Forwarding Engine
|
||||||
|
--- a/net/core/dev.c
|
||||||
|
+++ b/net/core/dev.c
|
||||||
|
@@ -3192,8 +3192,17 @@ static int xmit_one(struct sk_buff *skb,
|
||||||
|
unsigned int len;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+ /* If this skb has been fast forwarded then we don't want it to
|
||||||
|
+ * go to any taps (by definition we're trying to bypass them).
|
||||||
|
+ */
|
||||||
|
+ if (!skb->fast_forwarded) {
|
||||||
|
+#endif
|
||||||
|
if (dev_nit_active(dev))
|
||||||
|
dev_queue_xmit_nit(skb, dev);
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+ }
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_ETHERNET_PACKET_MANGLE
|
||||||
|
if (!dev->eth_mangle_tx ||
|
||||||
|
@@ -4684,6 +4693,11 @@ void netdev_rx_handler_unregister(struct
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
|
||||||
|
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
|
||||||
|
+EXPORT_SYMBOL_GPL(athrs_fast_nat_recv);
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
/*
|
||||||
|
* Limit the use of PFMEMALLOC reserves to those protocols that implement
|
||||||
|
* the special handling of PFMEMALLOC skbs.
|
||||||
|
@@ -4733,6 +4747,9 @@ static int __netif_receive_skb_core(stru
|
||||||
|
bool deliver_exact = false;
|
||||||
|
int ret = NET_RX_DROP;
|
||||||
|
__be16 type;
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+ int (*fast_recv)(struct sk_buff *skb);
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
net_timestamp_check(!netdev_tstamp_prequeue, skb);
|
||||||
|
|
||||||
|
@@ -4773,6 +4790,16 @@ another_round:
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifdef CONFIG_SHORTCUT_FE
|
||||||
|
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
|
||||||
|
+ if (fast_recv) {
|
||||||
|
+ if (fast_recv(skb)) {
|
||||||
|
+ ret = NET_RX_SUCCESS;
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
if (skb_skip_tc_classify(skb))
|
||||||
|
goto skip_classify;
|
||||||
|
|
||||||
Reference in New Issue
Block a user