kernel: pick patches for MediaTek Ethernet from linux-next

Pick patches with several fixes and improvements, preparation for
upcoming WED (TX) [1] as well as basic XDP support [2] with MediaTek's
Filogic SoCs to the mtk_eth_soc driver.

Also pick follow-up patch fixing Ethernet on MT7621 [3].

Tested on Bananapi BPi-R3 (MT7986), Bananapi BPi-R64 (MT7622),
Bananapi BPi-R2 (MT7623), MikroTik RouterBoard M11G (MT7621).

[1]: https://patchwork.kernel.org/project/netdevbpf/list/?series=662108&state=*
[2]: https://patchwork.kernel.org/project/netdevbpf/list/?series=675368&state=*
     (the first part of the series adding wed nodes to mt7986a.dtsi was
      applied to the copy of mt7986a.dtsi in our tree)
[3]: https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/commit/?id=5e69163d3b9931098922b3fc2f8e786af8c1f37e

Signed-off-by: Daniel Golle <daniel@makrotopia.org>
This commit is contained in:
Daniel Golle
2022-08-29 08:54:41 +02:00
parent 524f52a471
commit c93c5365c0
40 changed files with 5839 additions and 19 deletions

View File

@@ -0,0 +1,161 @@
commit 8610037e8106b48c79cfe0afb92b2b2466e51c3d
Author: Joe Damato <jdamato@fastly.com>
Date: Tue Mar 1 23:55:47 2022 -0800
page_pool: Add allocation stats
Add per-pool statistics counters for the allocation path of a page pool.
These stats are incremented in softirq context, so no locking or per-cpu
variables are needed.
This code is disabled by default and a kernel config option is provided for
users who wish to enable them.
The statistics added are:
- fast: successful fast path allocations
- slow: slow path order-0 allocations
- slow_high_order: slow path high order allocations
- empty: ptr ring is empty, so a slow path allocation was forced.
- refill: an allocation which triggered a refill of the cache
- waive: pages obtained from the ptr ring that cannot be added to
the cache due to a NUMA mismatch.
Signed-off-by: Joe Damato <jdamato@fastly.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -82,6 +82,19 @@ struct page_pool_params {
unsigned int offset; /* DMA addr offset */
};
+#ifdef CONFIG_PAGE_POOL_STATS
+struct page_pool_alloc_stats {
+ u64 fast; /* fast path allocations */
+ u64 slow; /* slow-path order 0 allocations */
+ u64 slow_high_order; /* slow-path high order allocations */
+ u64 empty; /* failed refills due to empty ptr ring, forcing
+ * slow path allocation
+ */
+ u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
+};
+#endif
+
struct page_pool {
struct page_pool_params p;
@@ -132,6 +145,11 @@ struct page_pool {
refcount_t user_cnt;
u64 destroy_cnt;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* these stats are incremented while in softirq context */
+ struct page_pool_alloc_stats alloc_stats;
+#endif
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -434,6 +434,19 @@ config NET_DEVLINK
config PAGE_POOL
bool
+config PAGE_POOL_STATS
+ default n
+ bool "Page pool stats"
+ depends on PAGE_POOL
+ help
+ Enable page pool statistics to track page allocation and recycling
+ in page pools. This option incurs additional CPU cost in allocation
+ and recycle paths and additional memory cost to store the statistics.
+ These statistics are only available if this option is enabled and if
+ the driver using the page pool supports exporting this data.
+
+ If unsure, say N.
+
config FAILOVER
tristate "Generic failover module"
help
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -26,6 +26,13 @@
#define BIAS_MAX LONG_MAX
+#ifdef CONFIG_PAGE_POOL_STATS
+/* alloc_stat_inc is intended to be used in softirq context */
+#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
+#else
+#define alloc_stat_inc(pool, __stat)
+#endif
+
static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params)
{
@@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
int pref_nid; /* preferred NUMA node */
/* Quicker fallback, avoid locks when ring is empty */
- if (__ptr_ring_empty(r))
+ if (__ptr_ring_empty(r)) {
+ alloc_stat_inc(pool, empty);
return NULL;
+ }
/* Softirq guarantee CPU and thus NUMA node is stable. This,
* assumes CPU refilling driver RX-ring will also run RX-NAPI.
@@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
* This limit stress on page buddy alloactor.
*/
page_pool_return_page(pool, page);
+ alloc_stat_inc(pool, waive);
page = NULL;
break;
}
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
/* Return last page */
- if (likely(pool->alloc.count > 0))
+ if (likely(pool->alloc.count > 0)) {
page = pool->alloc.cache[--pool->alloc.count];
+ alloc_stat_inc(pool, refill);
+ }
spin_unlock(&r->consumer_lock);
return page;
@@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
if (likely(pool->alloc.count)) {
/* Fast-path */
page = pool->alloc.cache[--pool->alloc.count];
+ alloc_stat_inc(pool, fast);
} else {
page = page_pool_refill_alloc_cache(pool);
}
@@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
return NULL;
}
+ alloc_stat_inc(pool, slow_high_order);
page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
@@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
}
/* Return last page */
- if (likely(pool->alloc.count > 0))
+ if (likely(pool->alloc.count > 0)) {
page = pool->alloc.cache[--pool->alloc.count];
- else
+ alloc_stat_inc(pool, slow);
+ } else {
page = NULL;
+ }
/* When page just alloc'ed is should/must have refcnt 1. */
return page;

View File

@@ -0,0 +1,137 @@
commit ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad
Author: Joe Damato <jdamato@fastly.com>
Date: Tue Mar 1 23:55:48 2022 -0800
page_pool: Add recycle stats
Add per-cpu stats tracking page pool recycling events:
- cached: recycling placed page in the page pool cache
- cache_full: page pool cache was full
- ring: page placed into the ptr ring
- ring_full: page released from page pool because the ptr ring was full
- released_refcnt: page released (and not recycled) because refcnt > 1
Signed-off-by: Joe Damato <jdamato@fastly.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
u64 refill; /* allocations via successful refill */
u64 waive; /* failed refills due to numa zone mismatch */
};
+
+struct page_pool_recycle_stats {
+ u64 cached; /* recycling placed page in the cache. */
+ u64 cache_full; /* cache was full */
+ u64 ring; /* recycling placed page back into ptr ring */
+ u64 ring_full; /* page was released from page-pool because
+ * PTR ring was full.
+ */
+ u64 released_refcnt; /* page released because of elevated
+ * refcnt
+ */
+};
#endif
struct page_pool {
@@ -136,6 +148,10 @@ struct page_pool {
*/
struct ptr_ring ring;
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* recycle stats are per-cpu to avoid locking */
+ struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
atomic_t pages_state_release_cnt;
/* A page_pool is strictly tied to a single RX-queue being
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -29,8 +29,15 @@
#ifdef CONFIG_PAGE_POOL_STATS
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
+/* recycle_stat_inc is safe to use when preemption is possible. */
+#define recycle_stat_inc(pool, __stat) \
+ do { \
+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
+ this_cpu_inc(s->__stat); \
+ } while (0)
#else
#define alloc_stat_inc(pool, __stat)
+#define recycle_stat_inc(pool, __stat)
#endif
static int page_pool_init(struct page_pool *pool,
@@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
pool->p.flags & PP_FLAG_PAGE_FRAG)
return -EINVAL;
+#ifdef CONFIG_PAGE_POOL_STATS
+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ if (!pool->recycle_stats)
+ return -ENOMEM;
+#endif
+
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM;
@@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
else
ret = ptr_ring_produce_bh(&pool->ring, page);
- return (ret == 0) ? true : false;
+ if (!ret) {
+ recycle_stat_inc(pool, ring);
+ return true;
+ }
+
+ return false;
}
/* Only allow direct recycling in special circumstances, into the
@@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
static bool page_pool_recycle_in_cache(struct page *page,
struct page_pool *pool)
{
- if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
+ if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
+ recycle_stat_inc(pool, cache_full);
return false;
+ }
/* Caller MUST have verified/know (page_ref_count(page) == 1) */
pool->alloc.cache[pool->alloc.count++] = page;
+ recycle_stat_inc(pool, cached);
return true;
}
@@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
* doing refcnt based recycle tricks, meaning another process
* will be invoking put_page.
*/
+ recycle_stat_inc(pool, released_refcnt);
/* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page);
put_page(page);
@@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
if (page && !page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */
+ recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, page);
}
}
@@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
if (pool->p.flags & PP_FLAG_DMA_MAP)
put_device(pool->p.dev);
+#ifdef CONFIG_PAGE_POOL_STATS
+ free_percpu(pool->recycle_stats);
+#endif
kfree(pool);
}

View File

@@ -0,0 +1,74 @@
commit 6b95e3388b1ea0ca63500c5a6e39162dbf828433
Author: Joe Damato <jdamato@fastly.com>
Date: Tue Mar 1 23:55:49 2022 -0800
page_pool: Add function to batch and return stats
Adds a function page_pool_get_stats which can be used by drivers to obtain
stats for a specified page_pool.
Signed-off-by: Joe Damato <jdamato@fastly.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -105,6 +105,23 @@ struct page_pool_recycle_stats {
* refcnt
*/
};
+
+/* This struct wraps the above stats structs so users of the
+ * page_pool_get_stats API can pass a single argument when requesting the
+ * stats for the page pool.
+ */
+struct page_pool_stats {
+ struct page_pool_alloc_stats alloc_stats;
+ struct page_pool_recycle_stats recycle_stats;
+};
+
+/*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
+ */
+bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats);
#endif
struct page_pool {
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -35,6 +35,31 @@
struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
this_cpu_inc(s->__stat); \
} while (0)
+
+bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats)
+{
+ int cpu = 0;
+
+ if (!stats)
+ return false;
+
+ memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats));
+
+ for_each_possible_cpu(cpu) {
+ const struct page_pool_recycle_stats *pcpu =
+ per_cpu_ptr(pool->recycle_stats, cpu);
+
+ stats->recycle_stats.cached += pcpu->cached;
+ stats->recycle_stats.cache_full += pcpu->cache_full;
+ stats->recycle_stats.ring += pcpu->ring;
+ stats->recycle_stats.ring_full += pcpu->ring_full;
+ stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(page_pool_get_stats);
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)

View File

@@ -0,0 +1,53 @@
commit 590032a4d2133ecc10d3078a8db1d85a4842f12c
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Mon Apr 11 16:05:26 2022 +0200
page_pool: Add recycle stats to page_pool_put_page_bulk
Add missing recycle stats to page_pool_put_page_bulk routine.
Reviewed-by: Joe Damato <jdamato@fastly.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://lore.kernel.org/r/3712178b51c007cfaed910ea80e68f00c916b1fa.1649685634.git.lorenzo@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -36,6 +36,12 @@
this_cpu_inc(s->__stat); \
} while (0)
+#define recycle_stat_add(pool, __stat, val) \
+ do { \
+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
+ this_cpu_add(s->__stat, val); \
+ } while (0)
+
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats)
{
@@ -63,6 +69,7 @@ EXPORT_SYMBOL(page_pool_get_stats);
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)
+#define recycle_stat_add(pool, __stat, val)
#endif
static int page_pool_init(struct page_pool *pool,
@@ -569,9 +576,13 @@ void page_pool_put_page_bulk(struct page
/* Bulk producer into ptr_ring page_pool cache */
page_pool_ring_lock(pool);
for (i = 0; i < bulk_len; i++) {
- if (__ptr_ring_produce(&pool->ring, data[i]))
- break; /* ring full */
+ if (__ptr_ring_produce(&pool->ring, data[i])) {
+ /* ring full */
+ recycle_stat_inc(pool, ring_full);
+ break;
+ }
}
+ recycle_stat_add(pool, ring, i);
page_pool_ring_unlock(pool);
/* Hopefully all pages was return into ptr_ring */

View File

@@ -0,0 +1,144 @@
commit f3c5264f452a5b0ac1de1f2f657efbabdea3c76a
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Tue Apr 12 18:31:58 2022 +0200
net: page_pool: introduce ethtool stats
Introduce page_pool APIs to report stats through ethtool and reduce
duplicated code in each driver.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -115,6 +115,10 @@ struct page_pool_stats {
struct page_pool_recycle_stats recycle_stats;
};
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
/*
* Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a
@@ -122,6 +126,23 @@ struct page_pool_stats {
*/
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats);
+#else
+
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+
#endif
struct page_pool {
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -18,6 +18,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h>
+#include <linux/ethtool.h>
#include <trace/events/page_pool.h>
@@ -42,6 +43,20 @@
this_cpu_add(s->__stat, val); \
} while (0)
+static const char pp_stats[][ETH_GSTRING_LEN] = {
+ "rx_pp_alloc_fast",
+ "rx_pp_alloc_slow",
+ "rx_pp_alloc_slow_ho",
+ "rx_pp_alloc_empty",
+ "rx_pp_alloc_refill",
+ "rx_pp_alloc_waive",
+ "rx_pp_recycle_cached",
+ "rx_pp_recycle_cache_full",
+ "rx_pp_recycle_ring",
+ "rx_pp_recycle_ring_full",
+ "rx_pp_recycle_released_ref",
+};
+
bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats)
{
@@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_poo
if (!stats)
return false;
- memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats));
+ /* The caller is responsible to initialize stats. */
+ stats->alloc_stats.fast += pool->alloc_stats.fast;
+ stats->alloc_stats.slow += pool->alloc_stats.slow;
+ stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
+ stats->alloc_stats.empty += pool->alloc_stats.empty;
+ stats->alloc_stats.refill += pool->alloc_stats.refill;
+ stats->alloc_stats.waive += pool->alloc_stats.waive;
for_each_possible_cpu(cpu) {
const struct page_pool_recycle_stats *pcpu =
@@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_poo
return true;
}
EXPORT_SYMBOL(page_pool_get_stats);
+
+u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
+ memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ return data;
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
+
+int page_pool_ethtool_stats_get_count(void)
+{
+ return ARRAY_SIZE(pp_stats);
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
+
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ struct page_pool_stats *pool_stats = stats;
+
+ *data++ = pool_stats->alloc_stats.fast;
+ *data++ = pool_stats->alloc_stats.slow;
+ *data++ = pool_stats->alloc_stats.slow_high_order;
+ *data++ = pool_stats->alloc_stats.empty;
+ *data++ = pool_stats->alloc_stats.refill;
+ *data++ = pool_stats->alloc_stats.waive;
+ *data++ = pool_stats->recycle_stats.cached;
+ *data++ = pool_stats->recycle_stats.cache_full;
+ *data++ = pool_stats->recycle_stats.ring;
+ *data++ = pool_stats->recycle_stats.ring_full;
+ *data++ = pool_stats->recycle_stats.released_refcnt;
+
+ return data;
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get);
+
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)

View File

@@ -0,0 +1,97 @@
commit 2e88d4ff03013937028f5397268b21e10cf68713
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri Jan 21 11:09:45 2022 +0100
xdp: introduce flags field in xdp_buff/xdp_frame
Introduce flags field in xdp_frame and xdp_buffer data structures
to define additional buffer features. At the moment the only
supported buffer feature is frags bit (XDP_FLAGS_HAS_FRAGS).
frags bit is used to specify if this is a linear buffer
(XDP_FLAGS_HAS_FRAGS not set) or a frags frame (XDP_FLAGS_HAS_FRAGS
set). In the latter case the driver is expected to initialize the
skb_shared_info structure at the end of the first buffer to link together
subsequent buffers belonging to the same frame.
Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/e389f14f3a162c0a5bc6a2e1aa8dd01a90be117d.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -66,6 +66,10 @@ struct xdp_txq_info {
struct net_device *dev;
};
+enum xdp_buff_flags {
+ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
+};
+
struct xdp_buff {
void *data;
void *data_end;
@@ -74,13 +78,30 @@ struct xdp_buff {
struct xdp_rxq_info *rxq;
struct xdp_txq_info *txq;
u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
+ u32 flags; /* supported values defined in xdp_buff_flags */
};
+static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+{
+ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
+static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags |= XDP_FLAGS_HAS_FRAGS;
+}
+
+static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
+{
+ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
+}
+
static __always_inline void
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
{
xdp->frame_sz = frame_sz;
xdp->rxq = rxq;
+ xdp->flags = 0;
}
static __always_inline void
@@ -122,8 +143,14 @@ struct xdp_frame {
*/
struct xdp_mem_info mem;
struct net_device *dev_rx; /* used by cpumap */
+ u32 flags; /* supported values defined in xdp_buff_flags */
};
+static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+{
+ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
+}
+
#define XDP_BULK_QUEUE_SIZE 16
struct xdp_frame_bulk {
int count;
@@ -180,6 +207,7 @@ void xdp_convert_frame_to_buff(struct xd
xdp->data_end = frame->data + frame->len;
xdp->data_meta = frame->data - frame->metasize;
xdp->frame_sz = frame->frame_sz;
+ xdp->flags = frame->flags;
}
static inline
@@ -206,6 +234,7 @@ int xdp_update_frame_from_buff(struct xd
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
xdp_frame->metasize = metasize;
xdp_frame->frame_sz = xdp->frame_sz;
+ xdp_frame->flags = xdp->flags;
return 0;
}

View File

@@ -0,0 +1,134 @@
commit 7c48cb0176c6d6d3b55029f7ff4ffa05faee6446
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri Jan 21 11:09:50 2022 +0100
xdp: add frags support to xdp_return_{buff/frame}
Take into account if the received xdp_buff/xdp_frame is non-linear
recycling/returning the frame memory to the allocator or into
xdp_frame_bulk.
Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/a961069febc868508ce1bdf5e53a343eb4e57cb2.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -275,10 +275,24 @@ void __xdp_release_frame(void *data, str
static inline void xdp_release_frame(struct xdp_frame *xdpf)
{
struct xdp_mem_info *mem = &xdpf->mem;
+ struct skb_shared_info *sinfo;
+ int i;
/* Curr only page_pool needs this */
- if (mem->type == MEM_TYPE_PAGE_POOL)
- __xdp_release_frame(xdpf->data, mem);
+ if (mem->type != MEM_TYPE_PAGE_POOL)
+ return;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_release_frame(page_address(page), mem);
+ }
+out:
+ __xdp_release_frame(xdpf->data, mem);
}
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -376,12 +376,38 @@ static void __xdp_return(void *data, str
void xdp_return_frame(struct xdp_frame *xdpf)
{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_return(page_address(page), &xdpf->mem, false, NULL);
+ }
+out:
__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_return(page_address(page), &xdpf->mem, true, NULL);
+ }
+out:
__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
@@ -417,7 +443,7 @@ void xdp_return_frame_bulk(struct xdp_fr
struct xdp_mem_allocator *xa;
if (mem->type != MEM_TYPE_PAGE_POOL) {
- __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
+ xdp_return_frame(xdpf);
return;
}
@@ -436,12 +462,38 @@ void xdp_return_frame_bulk(struct xdp_fr
bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
}
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ struct skb_shared_info *sinfo;
+ int i;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ bq->q[bq->count++] = skb_frag_address(frag);
+ if (bq->count == XDP_BULK_QUEUE_SIZE)
+ xdp_flush_frame_bulk(bq);
+ }
+ }
bq->q[bq->count++] = xdpf->data;
}
EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
void xdp_return_buff(struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+ __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
+ }
+out:
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
}

View File

@@ -0,0 +1,29 @@
commit d16697cb6261d4cc23422e6b1cb2759df8aa76d0
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri Jan 21 11:09:44 2022 +0100
net: skbuff: add size metadata to skb_shared_info for xdp
Introduce xdp_frags_size field in skb_shared_info data structure
to store xdp_buff/xdp_frame frame paged size (xdp_frags_size will
be used in xdp frags support). In order to not increase
skb_shared_info size we will use a hole due to skb_shared_info
alignment.
Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/8a849819a3e0a143d540f78a3a5add76e17e980d.1642758637.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -567,6 +567,7 @@ struct skb_shared_info {
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
+ unsigned int xdp_frags_size;
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */

View File

@@ -0,0 +1,62 @@
commit 5142239a22219921a7863cf00c9ab853c00689d8
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri Mar 11 10:14:18 2022 +0100
net: veth: Account total xdp_frame len running ndo_xdp_xmit
Even if this is a theoretical issue since it is not possible to perform
XDP_REDIRECT on a non-linear xdp_frame, veth driver does not account
paged area in ndo_xdp_xmit function pointer.
Introduce xdp_get_frame_len utility routine to get the xdp_frame full
length and account total frame size running XDP_REDIRECT of a
non-linear xdp frame into a veth device.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/54f9fd3bb65d190daf2c0bbae2f852ff16cfbaa0.1646989407.git.lorenzo@kernel.org
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -501,7 +501,7 @@ static int veth_xdp_xmit(struct net_devi
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
- if (unlikely(frame->len > max_len ||
+ if (unlikely(xdp_get_frame_len(frame) > max_len ||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
@@ -862,7 +862,7 @@ static int veth_xdp_rcv(struct veth_rq *
/* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- stats->xdp_bytes += frame->len;
+ stats->xdp_bytes += xdp_get_frame_len(frame);
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
if (frame) {
/* XDP_PASS */
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -295,6 +295,20 @@ out:
__xdp_release_frame(xdpf->data, mem);
}
+static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
+{
+ struct skb_shared_info *sinfo;
+ unsigned int len = xdpf->len;
+
+ if (likely(!xdp_frame_has_frags(xdpf)))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ len += sinfo->xdp_frags_size;
+out:
+ return len;
+}
+
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index, unsigned int napi_id);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);

View File

@@ -0,0 +1,35 @@
commit 7cda76d858a4e71ac4a04066c093679a12e1312c
Author: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri Mar 11 10:14:20 2022 +0100
veth: Allow jumbo frames in xdp mode
Allow increasing the MTU over page boundaries on veth devices
if the attached xdp program declares to support xdp fragments.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/d5dc039c3d4123426e7023a488c449181a7bc57f.1646989407.git.lorenzo@kernel.org
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1470,9 +1470,14 @@ static int veth_xdp_set(struct net_devic
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ //if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;

View File

@@ -0,0 +1,330 @@
From 23233e577ef973c2c5d0dd757a0a4605e34ecb57 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri, 22 Jul 2022 09:19:36 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on page_pool for single page
buffers
Rely on page_pool allocator for single page buffers in order to keep
them dma mapped and add skb recycling support.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/Kconfig | 1 +
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++-----
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++
3 files changed, 156 insertions(+), 40 deletions(-)
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -16,6 +16,7 @@ config NET_MEDIATEK_SOC
depends on NET_DSA || !NET_DSA
select PHYLINK
select DIMLIB
+ select PAGE_POOL
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1388,6 +1388,68 @@ static void mtk_update_rx_cpu_idx(struct
}
}
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
@@ -1401,9 +1463,9 @@ static int mtk_poll_rx(struct napi_struc
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ u32 hash, reason, reserve_len;
struct net_device *netdev;
dma_addr_t dma_addr;
- u32 hash, reason;
int mac = 0;
ring = mtk_get_rx_ring(eth);
@@ -1434,36 +1496,54 @@ static int mtk_poll_rx(struct napi_struc
goto release_desc;
/* alloc new buffer */
- if (ring->frag_size <= PAGE_SIZE)
- new_data = napi_alloc_frag(ring->frag_size);
- else
- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dma_dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
- dma_unmap_single(eth->dma_dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ }
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- skb_free_frag(data);
+ mtk_rx_put_buff(ring, data, true);
netdev->stats.rx_dropped++;
goto skip_rx;
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ if (ring->page_pool) {
+ reserve_len = MTK_PP_HEADROOM;
+ skb_mark_for_recycle(skb);
+ } else {
+ reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
+ }
+ skb_reserve(skb, reserve_len);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@@ -1517,7 +1597,6 @@ static int mtk_poll_rx(struct napi_struc
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1525,7 +1604,6 @@ release_desc:
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1889,13 +1967,15 @@ static int mtk_rx_alloc(struct mtk_eth *
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- if (ring->frag_size <= PAGE_SIZE)
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- else
- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- if (!ring->data[i])
- return -ENOMEM;
+ if (!eth->hwlro) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
@@ -1906,16 +1986,33 @@ static int mtk_rx_alloc(struct mtk_eth *
for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
-
- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ dma_addr_t dma_addr;
+ void *data;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1931,6 +2028,7 @@ static int mtk_rx_alloc(struct mtk_eth *
rxd->rxd8 = 0;
}
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
@@ -1982,7 +2080,7 @@ static void mtk_rx_clean(struct mtk_eth
dma_unmap_single(eth->dma_dev, rxd->rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
@@ -1994,6 +2092,13 @@ static void mtk_rx_clean(struct mtk_eth
ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -18,6 +18,8 @@
#include <linux/rhashtable.h>
#include <linux/dim.h>
#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
@@ -49,6 +51,11 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
#define MTK_QRX_OFFSET 0x10
#define MTK_MAX_RX_RING_NUM 4
@@ -742,6 +749,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {

View File

@@ -0,0 +1,291 @@
From 7c26c20da5d420cde55618263be4aa2f6de53056 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri, 22 Jul 2022 09:19:37 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: add basic XDP support
Introduce basic XDP support to mtk_eth_soc driver.
Supported XDP verdicts:
- XDP_PASS
- XDP_DROP
- XDP_REDIRECT
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 162 +++++++++++++++++---
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +
2 files changed, 145 insertions(+), 19 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1388,6 +1388,11 @@ static void mtk_update_rx_cpu_idx(struct
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return !eth->hwlro;
+}
+
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
struct xdp_rxq_info *xdp_q,
int id, int size)
@@ -1450,11 +1455,52 @@ static void mtk_rx_put_buff(struct mtk_r
skb_free_frag(data);
}
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+ goto out;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
@@ -1463,9 +1509,9 @@ static int mtk_poll_rx(struct napi_struc
while (done < budget) {
unsigned int pktlen, *rxdcsum;
- u32 hash, reason, reserve_len;
struct net_device *netdev;
dma_addr_t dma_addr;
+ u32 hash, reason;
int mac = 0;
ring = mtk_get_rx_ring(eth);
@@ -1495,8 +1541,14 @@ static int mtk_poll_rx(struct napi_struc
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
GFP_ATOMIC);
@@ -1504,6 +1556,34 @@ static int mtk_poll_rx(struct napi_struc
netdev->stats.rx_dropped++;
goto release_desc;
}
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
new_data = napi_alloc_frag(ring->frag_size);
@@ -1527,27 +1607,20 @@ static int mtk_poll_rx(struct napi_struc
dma_unmap_single(eth->dma_dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- mtk_rx_put_buff(ring, data, true);
- netdev->stats.rx_dropped++;
- goto skip_rx;
- }
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- if (ring->page_pool) {
- reserve_len = MTK_PP_HEADROOM;
- skb_mark_for_recycle(skb);
- } else {
- reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, reserve_len);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
+ bytes += skb->len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
rxdcsum = &trxd.rxd3;
@@ -1559,7 +1632,6 @@ static int mtk_poll_rx(struct napi_struc
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -1622,6 +1694,9 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
@@ -1967,7 +2042,7 @@ static int mtk_rx_alloc(struct mtk_eth *
if (!ring->data)
return -ENOMEM;
- if (!eth->hwlro) {
+ if (mtk_page_pool_enabled(eth)) {
struct page_pool *pp;
pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
@@ -2707,6 +2782,48 @@ static int mtk_stop(struct net_device *d
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -3002,6 +3119,12 @@ static int mtk_change_mtu(struct net_dev
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -3329,6 +3452,7 @@ static const struct net_device_ops mtk_n
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1085,6 +1085,8 @@ struct mtk_eth {
struct mtk_ppe *ppe;
struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the

View File

@@ -0,0 +1,110 @@
From 916a6ee836d6b7b8ef1ed5f0515e256ca60e9968 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri, 22 Jul 2022 09:19:38 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters
Report xdp stats through ethtool
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 +++++++++++++++++++--
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 ++++++++++
2 files changed, 36 insertions(+), 2 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message lev
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
@@ -1458,6 +1469,9 @@ static void mtk_rx_put_buff(struct mtk_r
static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
struct xdp_buff *xdp, struct net_device *dev)
{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
struct bpf_prog *prog;
u32 act = XDP_PASS;
@@ -1470,13 +1484,16 @@ static u32 mtk_xdp_run(struct mtk_eth *e
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- goto out;
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
act = XDP_DROP;
break;
}
- goto out;
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -1489,6 +1506,11 @@ static u32 mtk_xdp_run(struct mtk_eth *e
page_pool_put_full_page(ring->page_pool,
virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
out:
rcu_read_unlock();
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -567,6 +567,16 @@ struct mtk_tx_dma_v2 {
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -590,6 +600,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;

View File

@@ -0,0 +1,340 @@
From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri, 22 Jul 2022 09:19:39 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support
Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
pointer.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++---
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
2 files changed, 180 insertions(+), 22 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -987,15 +987,26 @@ static void mtk_tx_unmap(struct mtk_eth
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ if (tx_buf->data &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ }
+ } else if (tx_buf->data) {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
else
- dev_kfree_skb_any(tx_buf->skb);
+ xdp_return_frame(xdpf);
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -1012,7 +1023,7 @@ static void setup_tx_buf(struct mtk_eth
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -1188,7 +1199,7 @@ static int mtk_tx_map(struct sk_buff *sk
soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
@@ -1202,7 +1213,8 @@ static int mtk_tx_map(struct sk_buff *sk
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
@@ -1414,13 +1426,14 @@ static struct page_pool *mtk_create_page
.pool_size = size,
.nid = NUMA_NO_NODE,
.dev = eth->dma_dev,
- .dma_dir = DMA_FROM_DEVICE,
.offset = MTK_PP_HEADROOM,
.max_len = MTK_PP_MAX_BUF_SIZE,
};
struct page_pool *pp;
int err;
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
pp = page_pool_create(&pp_params);
if (IS_ERR(pp))
return pp;
@@ -1466,6 +1479,122 @@ static void mtk_rx_put_buff(struct mtk_r
skb_free_frag(data);
}
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = true,
+ };
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd, *txd_pdma;
+ int err = 0, index = 0, n_desc = 1;
+ struct mtk_tx_buf *tx_buf;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ if (unlikely(atomic_read(&ring->free_count) <= 1))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
+ txd_info.size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(xdpf->data);
+
+ txd_info.addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
+ txd_info.size,
+ DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
+ index++);
+
+ /* store xdpf for cleanup */
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+out:
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
struct xdp_buff *xdp, struct net_device *dev)
{
@@ -1494,6 +1623,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e
count = &hw_stats->xdp_stats.rx_xdp_redirect;
goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -1727,9 +1868,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
@@ -1750,15 +1890,21 @@ static int mtk_poll_tx_qdma(struct mtk_e
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
+
bytes[mac] += skb->len;
done[mac]++;
budget--;
+ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+ tx_buf->type == MTK_TYPE_XDP_NDO) {
+ budget--;
}
+
mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
@@ -1777,9 +1923,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
@@ -1787,14 +1932,18 @@ static int mtk_poll_tx_pdma(struct mtk_e
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
bytes[0] += skb->len;
done[0]++;
budget--;
+ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+ tx_buf->type == MTK_TYPE_XDP_NDO) {
+ budget--;
}
mtk_tx_unmap(eth, tx_buf, true);
@@ -3475,6 +3624,7 @@ static const struct net_device_ops mtk_n
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -693,6 +693,12 @@ enum mtk_dev_state {
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -702,7 +708,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);

View File

@@ -0,0 +1,95 @@
From 84b9cd389036d4a262d8cee794d56c04095358a7 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Fri, 22 Jul 2022 09:19:40 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for
page_pool_get_stats
Introduce support for the page_pool stats API into mtk_eth_soc driver.
Report page_pool stats through ethtool.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/Kconfig | 1 +
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++--
2 files changed, 35 insertions(+), 3 deletions(-)
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
select PHYLINK
select DIMLIB
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3485,11 +3485,18 @@ static void mtk_get_strings(struct net_d
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -3497,13 +3504,35 @@ static void mtk_get_strings(struct net_d
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -3531,6 +3560,8 @@ static void mtk_get_ethtool_stats(struct
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}

View File

@@ -0,0 +1,113 @@
From b16fe6d82b71fa0dd5c957bc22d66a694976d6eb Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Wed, 27 Jul 2022 23:20:50 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_xdp_frame_map
utility routine
This is a preliminary patch to add xdp multi-frag support to mtk_eth_soc
driver
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 68 +++++++++++++--------
1 file changed, 42 insertions(+), 26 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1479,6 +1479,41 @@ static void mtk_rx_put_buff(struct mtk_r
skb_free_frag(data);
}
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
struct net_device *dev, bool dma_map)
{
@@ -1489,9 +1524,8 @@ static int mtk_xdp_submit_frame(struct m
.first = true,
.last = true,
};
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_tx_dma *txd, *txd_pdma;
int err = 0, index = 0, n_desc = 1;
+ struct mtk_tx_dma *txd, *txd_pdma;
struct mtk_tx_buf *tx_buf;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
@@ -1511,36 +1545,18 @@ static int mtk_xdp_submit_frame(struct m
tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
memset(tx_buf, 0, sizeof(*tx_buf));
- if (dma_map) { /* ndo_xdp_xmit */
- txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
- txd_info.size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
- err = -ENOMEM;
- goto out;
- }
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- } else {
- struct page *page = virt_to_head_page(xdpf->data);
-
- txd_info.addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
- txd_info.size,
- DMA_BIDIRECTIONAL);
- }
- mtk_tx_set_dma_desc(dev, txd, &txd_info);
-
- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
-
- txd_pdma = qdma_to_pdma(ring, txd);
- setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
- index++);
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ xdpf->data, xdpf->headroom, index,
+ dma_map);
+ if (err < 0)
+ goto out;
/* store xdpf for cleanup */
tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
tx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
if (index & 1)
txd_pdma->txd2 |= TX_DMA_LS0;
else

View File

@@ -0,0 +1,218 @@
From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Wed, 27 Jul 2022 23:20:51 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support
Add the capability to map non-linear xdp frames in XDP_TX and
ndo_xdp_xmit callback.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++-------
1 file changed, 82 insertions(+), 43 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -987,23 +987,22 @@ static void mtk_tx_unmap(struct mtk_eth
}
}
- if (tx_buf->type == MTK_TYPE_SKB) {
- if (tx_buf->data &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
struct sk_buff *skb = tx_buf->data;
if (napi)
napi_consume_skb(skb, napi);
else
dev_kfree_skb_any(skb);
- }
- } else if (tx_buf->data) {
- struct xdp_frame *xdpf = tx_buf->data;
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
- if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
}
tx_buf->flags = 0;
tx_buf->data = NULL;
@@ -1506,6 +1505,8 @@ static int mtk_xdp_frame_map(struct mtk_
mtk_tx_set_dma_desc(dev, txd, txd_info);
tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd_pdma = qdma_to_pdma(ring, txd);
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
@@ -1517,43 +1518,69 @@ static int mtk_xdp_frame_map(struct mtk_
static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
struct net_device *dev, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma_desc_info txd_info = {
.size = xdpf->len,
.first = true,
- .last = true,
+ .last = !xdp_frame_has_frags(xdpf),
};
- int err = 0, index = 0, n_desc = 1;
- struct mtk_tx_dma *txd, *txd_pdma;
- struct mtk_tx_buf *tx_buf;
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
return -EBUSY;
- if (unlikely(atomic_read(&ring->free_count) <= 1))
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
return -EBUSY;
spin_lock(&eth->page_lock);
txd = ring->next_free;
if (txd == ring->last_free) {
- err = -ENOMEM;
- goto out;
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
}
+ htxd = txd;
tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
- err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
- xdpf->data, xdpf->headroom, index,
- dma_map);
- if (err < 0)
- goto out;
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
/* store xdpf for cleanup */
- tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
- tx_buf->data = xdpf;
+ htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
txd_pdma = qdma_to_pdma(ring, txd);
@@ -1580,7 +1607,24 @@ static int mtk_xdp_submit_frame(struct m
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
-out:
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
spin_unlock(&eth->page_lock);
return err;
@@ -1909,18 +1953,15 @@ static int mtk_poll_tx_qdma(struct mtk_e
if (!tx_buf->data)
break;
- if (tx_buf->type == MTK_TYPE_SKB &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- struct sk_buff *skb = tx_buf->data;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
- tx_buf->type == MTK_TYPE_XDP_NDO) {
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
-
mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
@@ -1951,17 +1992,15 @@ static int mtk_poll_tx_pdma(struct mtk_e
if (!tx_buf->data)
break;
- if (tx_buf->type == MTK_TYPE_SKB &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- struct sk_buff *skb = tx_buf->data;
- bytes[0] += skb->len;
- done[0]++;
- budget--;
- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
- tx_buf->type == MTK_TYPE_XDP_NDO) {
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
-
mtk_tx_unmap(eth, tx_buf, true);
desc = ring->dma + cpu * eth->soc->txrx.txd_size;

View File

@@ -18,9 +18,9 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1469,10 +1469,19 @@ static int mtk_poll_rx(struct napi_struc
@@ -1845,10 +1845,19 @@ static int mtk_poll_rx(struct napi_struc
skb->dev = netdev;
skb_put(skb, pktlen);
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
@@ -40,9 +40,9 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1481,16 +1490,9 @@ static int mtk_poll_rx(struct napi_struc
@@ -1856,16 +1865,9 @@ static int mtk_poll_rx(struct napi_struc
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
bytes += pktlen;
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -60,7 +60,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -307,6 +307,11 @@
@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)

View File

@@ -0,0 +1,33 @@
From c9daab322313087afde8c46f41df3c628410ae20 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Mon, 5 Sep 2022 14:46:01 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: remove mtk_foe_entry_timestamp
Get rid of mtk_foe_entry_timestamp routine since it is no longer used.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/mediatek/mtk_ppe.h | 11 -----------
1 file changed, 11 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -302,17 +302,6 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s
__mtk_ppe_check_skb(ppe, skb, hash);
}
-static inline int
-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
-{
- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
-
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
- return -1;
-
- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
-}
-
int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
u8 pse_port, u8 *src_mac, u8 *dest_mac);
int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);

View File

@@ -0,0 +1,35 @@
From 5e69163d3b9931098922b3fc2f8e786af8c1f37e Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Tue, 13 Sep 2022 15:03:05 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: enable XDP support just for
MT7986 SoC
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Disable page_pool/XDP support for MT7621 SoC in order fix a regression
introduce adding XDP for MT7986 SoC. There is no a real use case for XDP
on MT7621 since it is a low-end cpu. Moreover this patch reduces the
memory footprint.
Tested-by: Sergio Paracuellos <sergio.paracuellos@gmail.com>
Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
Fixes: 23233e577ef9 ("net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers")
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/2bf31e27b888c43228b0d84dd2ef5033338269e2.1663074002.git.lorenzo@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1412,7 +1412,7 @@ static void mtk_update_rx_cpu_idx(struct
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return !eth->hwlro;
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,