kernel: backport upstream mediatek WED changes
Reorder and update existing patches
Signed-off-by: Felix Fietkau <nbd@nbd.name>
(cherry picked from commit 6407ef8d2b)
[rmilecki: rebase & fix mt76 compilation]
Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
This commit is contained in:
committed by
Rafał Miłecki
parent
afe2ddf827
commit
11d88dee1c
@@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/include/linux/netdevice.h
|
||||
+++ b/include/linux/netdevice.h
|
||||
@@ -2098,6 +2098,8 @@ struct net_device {
|
||||
@@ -2099,6 +2099,8 @@ struct net_device {
|
||||
struct netdev_hw_addr_list mc;
|
||||
struct netdev_hw_addr_list dev_addrs;
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -3097,8 +3097,8 @@ static irqreturn_t mtk_handle_irq_rx(int
|
||||
@@ -3098,8 +3098,8 @@ static irqreturn_t mtk_handle_irq_rx(int
|
||||
|
||||
eth->rx_events++;
|
||||
if (likely(napi_schedule_prep(ð->rx_napi))) {
|
||||
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -3110,8 +3110,8 @@ static irqreturn_t mtk_handle_irq_tx(int
|
||||
@@ -3111,8 +3111,8 @@ static irqreturn_t mtk_handle_irq_tx(int
|
||||
|
||||
eth->tx_events++;
|
||||
if (likely(napi_schedule_prep(ð->tx_napi))) {
|
||||
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -4885,6 +4885,8 @@ static int mtk_probe(struct platform_dev
|
||||
@@ -4886,6 +4886,8 @@ static int mtk_probe(struct platform_dev
|
||||
* for NAPI to work
|
||||
*/
|
||||
init_dummy_netdev(ð->dummy_dev);
|
||||
|
||||
@@ -16,7 +16,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -1515,12 +1515,28 @@ static void mtk_wake_queue(struct mtk_et
|
||||
@@ -1516,12 +1516,28 @@ static void mtk_wake_queue(struct mtk_et
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
bool gso = false;
|
||||
int tx_num;
|
||||
|
||||
@@ -1542,6 +1558,18 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
@@ -1543,6 +1559,18 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
/* TSO: fill MSS info in tcp checksum field */
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb_cow_head(skb, 0)) {
|
||||
@@ -1557,8 +1585,14 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
@@ -1558,8 +1586,14 @@ static netdev_tx_t mtk_start_xmit(struct
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -722,6 +722,7 @@ static void mtk_mac_link_up(struct phyli
|
||||
@@ -723,6 +723,7 @@ static void mtk_mac_link_up(struct phyli
|
||||
MAC_MCR_FORCE_RX_FC);
|
||||
|
||||
/* Configure speed */
|
||||
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
switch (speed) {
|
||||
case SPEED_2500:
|
||||
case SPEED_1000:
|
||||
@@ -3290,6 +3291,9 @@ found:
|
||||
@@ -3291,6 +3292,9 @@ found:
|
||||
if (dp->index >= MTK_QDMA_NUM_QUEUES)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
#include <net/dsa.h>
|
||||
#include "mtk_eth_soc.h"
|
||||
#include "mtk_ppe.h"
|
||||
@@ -781,7 +782,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
@@ -835,7 +836,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Mon, 20 Mar 2023 11:44:30 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
|
||||
from wlan devices
|
||||
|
||||
WED version 2 (on MT7986 and later) can offload flows originating from wireless
|
||||
devices. In order to make that work, ndo_setup_tc needs to be implemented on
|
||||
the netdevs. This adds the required code to offload flows coming in from WED,
|
||||
while keeping track of the incoming wed index used for selecting the correct
|
||||
PPE device.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
||||
@@ -1448,6 +1448,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
|
||||
int mtk_eth_offload_init(struct mtk_eth *eth);
|
||||
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data);
|
||||
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
|
||||
+ int ppe_index);
|
||||
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
|
||||
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
|
||||
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
@@ -235,7 +235,8 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
-mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
|
||||
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
|
||||
+ int ppe_index)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_action_entry *act;
|
||||
@@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth
|
||||
entry->cookie = f->cookie;
|
||||
memcpy(&entry->data, &foe, sizeof(entry->data));
|
||||
entry->wed_index = wed_index;
|
||||
+ entry->ppe_index = ppe_index;
|
||||
|
||||
err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
|
||||
if (err < 0)
|
||||
@@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
|
||||
|
||||
static DEFINE_MUTEX(mtk_flow_offload_mutex);
|
||||
|
||||
-static int
|
||||
-mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
||||
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
|
||||
+ int ppe_index)
|
||||
{
|
||||
- struct flow_cls_offload *cls = type_data;
|
||||
- struct net_device *dev = cb_priv;
|
||||
- struct mtk_mac *mac = netdev_priv(dev);
|
||||
- struct mtk_eth *eth = mac->hw;
|
||||
int err;
|
||||
|
||||
- if (!tc_can_offload(dev))
|
||||
- return -EOPNOTSUPP;
|
||||
-
|
||||
- if (type != TC_SETUP_CLSFLOWER)
|
||||
- return -EOPNOTSUPP;
|
||||
-
|
||||
mutex_lock(&mtk_flow_offload_mutex);
|
||||
switch (cls->command) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
- err = mtk_flow_offload_replace(eth, cls);
|
||||
+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
|
||||
break;
|
||||
case FLOW_CLS_DESTROY:
|
||||
err = mtk_flow_offload_destroy(eth, cls);
|
||||
@@ -556,6 +548,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
|
||||
}
|
||||
|
||||
static int
|
||||
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
||||
+{
|
||||
+ struct flow_cls_offload *cls = type_data;
|
||||
+ struct net_device *dev = cb_priv;
|
||||
+ struct mtk_mac *mac = netdev_priv(dev);
|
||||
+ struct mtk_eth *eth = mac->hw;
|
||||
+
|
||||
+ if (!tc_can_offload(dev))
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ if (type != TC_SETUP_CLSFLOWER)
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ return mtk_flow_offload_cmd(eth, cls, 0);
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
|
||||
@@ -13,6 +13,8 @@
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/soc/mediatek/mtk_wed.h>
|
||||
+#include <net/flow_offload.h>
|
||||
+#include <net/pkt_cls.h>
|
||||
#include "mtk_eth_soc.h"
|
||||
#include "mtk_wed_regs.h"
|
||||
#include "mtk_wed.h"
|
||||
@@ -41,6 +43,11 @@
|
||||
static struct mtk_wed_hw *hw_list[2];
|
||||
static DEFINE_MUTEX(hw_lock);
|
||||
|
||||
+struct mtk_wed_flow_block_priv {
|
||||
+ struct mtk_wed_hw *hw;
|
||||
+ struct net_device *dev;
|
||||
+};
|
||||
+
|
||||
static void
|
||||
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
|
||||
{
|
||||
@@ -1760,6 +1767,99 @@ out:
|
||||
mutex_unlock(&hw_lock);
|
||||
}
|
||||
|
||||
+static int
|
||||
+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
||||
+{
|
||||
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
|
||||
+ struct flow_cls_offload *cls = type_data;
|
||||
+ struct mtk_wed_hw *hw = priv->hw;
|
||||
+
|
||||
+ if (!tc_can_offload(priv->dev))
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ if (type != TC_SETUP_CLSFLOWER)
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
|
||||
+ struct flow_block_offload *f)
|
||||
+{
|
||||
+ struct mtk_wed_flow_block_priv *priv;
|
||||
+ static LIST_HEAD(block_cb_list);
|
||||
+ struct flow_block_cb *block_cb;
|
||||
+ struct mtk_eth *eth = hw->eth;
|
||||
+ flow_setup_cb_t *cb;
|
||||
+
|
||||
+ if (!eth->soc->offload_version)
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ cb = mtk_wed_setup_tc_block_cb;
|
||||
+ f->driver_block_list = &block_cb_list;
|
||||
+
|
||||
+ switch (f->command) {
|
||||
+ case FLOW_BLOCK_BIND:
|
||||
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
|
||||
+ if (block_cb) {
|
||||
+ flow_block_cb_incref(block_cb);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
+ if (!priv)
|
||||
+ return -ENOMEM;
|
||||
+
|
||||
+ priv->hw = hw;
|
||||
+ priv->dev = dev;
|
||||
+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
|
||||
+ if (IS_ERR(block_cb)) {
|
||||
+ kfree(priv);
|
||||
+ return PTR_ERR(block_cb);
|
||||
+ }
|
||||
+
|
||||
+ flow_block_cb_incref(block_cb);
|
||||
+ flow_block_cb_add(block_cb, f);
|
||||
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
|
||||
+ return 0;
|
||||
+ case FLOW_BLOCK_UNBIND:
|
||||
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
|
||||
+ if (!block_cb)
|
||||
+ return -ENOENT;
|
||||
+
|
||||
+ if (!flow_block_cb_decref(block_cb)) {
|
||||
+ flow_block_cb_remove(block_cb, f);
|
||||
+ list_del(&block_cb->driver_list);
|
||||
+ kfree(block_cb->cb_priv);
|
||||
+ }
|
||||
+ return 0;
|
||||
+ default:
|
||||
+ return -EOPNOTSUPP;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
|
||||
+ enum tc_setup_type type, void *type_data)
|
||||
+{
|
||||
+ struct mtk_wed_hw *hw = wed->hw;
|
||||
+
|
||||
+ if (hw->version < 2)
|
||||
+ return -EOPNOTSUPP;
|
||||
+
|
||||
+ switch (type) {
|
||||
+ case TC_SETUP_BLOCK:
|
||||
+ case TC_SETUP_FT:
|
||||
+ return mtk_wed_setup_tc_block(hw, dev, type_data);
|
||||
+ default:
|
||||
+ return -EOPNOTSUPP;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
|
||||
void __iomem *wdma, phys_addr_t wdma_phy,
|
||||
int index)
|
||||
@@ -1779,6 +1879,7 @@ void mtk_wed_add_hw(struct device_node *
|
||||
.irq_set_mask = mtk_wed_irq_set_mask,
|
||||
.detach = mtk_wed_detach,
|
||||
.ppe_check = mtk_wed_ppe_check,
|
||||
+ .setup_tc = mtk_wed_setup_tc,
|
||||
};
|
||||
struct device_node *eth_np = eth->dev->of_node;
|
||||
struct platform_device *pdev;
|
||||
--- a/include/linux/soc/mediatek/mtk_wed.h
|
||||
+++ b/include/linux/soc/mediatek/mtk_wed.h
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
+#include <linux/netdevice.h>
|
||||
|
||||
#define MTK_WED_TX_QUEUES 2
|
||||
#define MTK_WED_RX_QUEUES 2
|
||||
@@ -180,6 +181,8 @@ struct mtk_wed_ops {
|
||||
|
||||
u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
|
||||
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
|
||||
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
|
||||
+ enum tc_setup_type type, void *type_data);
|
||||
};
|
||||
|
||||
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
|
||||
@@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
|
||||
(_dev)->ops->msg_update(_dev, _id, _msg, _len)
|
||||
#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
|
||||
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
|
||||
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
|
||||
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
|
||||
#else
|
||||
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
|
||||
{
|
||||
@@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
|
||||
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
|
||||
#define mtk_wed_device_stop(_dev) do {} while (0)
|
||||
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
|
||||
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,37 +0,0 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Mon, 20 Mar 2023 15:37:55 +0100
|
||||
Subject: [PATCH] net: ethernet: mediatek: mtk_ppe: prefer newly added l2
|
||||
flows over existing ones
|
||||
|
||||
When a device is roaming between interfaces and a new flow entry is created,
|
||||
we should assume that its output device is more up to date than whatever
|
||||
entry existed already.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -663,10 +663,20 @@ void mtk_foe_entry_clear(struct mtk_ppe
|
||||
static int
|
||||
mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
{
|
||||
+ struct mtk_flow_entry *prev;
|
||||
+
|
||||
entry->type = MTK_FLOW_TYPE_L2;
|
||||
|
||||
- return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
|
||||
- mtk_flow_l2_ht_params);
|
||||
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
|
||||
+ mtk_flow_l2_ht_params);
|
||||
+ if (likely(!prev))
|
||||
+ return 0;
|
||||
+
|
||||
+ if (IS_ERR(prev))
|
||||
+ return PTR_ERR(prev);
|
||||
+
|
||||
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
|
||||
+ &entry->l2_node, mtk_flow_l2_ht_params);
|
||||
}
|
||||
|
||||
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
@@ -1,331 +0,0 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 23 Mar 2023 10:24:11 +0100
|
||||
Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
|
||||
offloaded flows
|
||||
|
||||
Unify tracking of L2 and L3 flows. Use the generic list field in struct
|
||||
mtk_foe_entry for tracking L2 subflows. Preparation for improving
|
||||
flow accounting support.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -483,42 +483,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static int
|
||||
+mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
|
||||
+{
|
||||
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
|
||||
+
|
||||
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
|
||||
+ return offsetof(struct mtk_foe_entry, ipv6._rsv);
|
||||
+ else
|
||||
+ return offsetof(struct mtk_foe_entry, ipv4.ib2);
|
||||
+}
|
||||
+
|
||||
static bool
|
||||
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
|
||||
- struct mtk_foe_entry *data)
|
||||
+ struct mtk_foe_entry *data, int len)
|
||||
{
|
||||
- int type, len;
|
||||
-
|
||||
if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
|
||||
return false;
|
||||
|
||||
- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
|
||||
- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
|
||||
- len = offsetof(struct mtk_foe_entry, ipv6._rsv);
|
||||
- else
|
||||
- len = offsetof(struct mtk_foe_entry, ipv4.ib2);
|
||||
-
|
||||
return !memcmp(&entry->data.data, &data->data, len - 4);
|
||||
}
|
||||
|
||||
static void
|
||||
-__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
|
||||
+ bool set_state)
|
||||
{
|
||||
- struct hlist_head *head;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
if (entry->type == MTK_FLOW_TYPE_L2) {
|
||||
rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
|
||||
mtk_flow_l2_ht_params);
|
||||
|
||||
- head = &entry->l2_flows;
|
||||
- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
|
||||
- __mtk_foe_entry_clear(ppe, entry);
|
||||
+ hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list)
|
||||
+ __mtk_foe_entry_clear(ppe, entry, set_state);
|
||||
return;
|
||||
}
|
||||
|
||||
- hlist_del_init(&entry->list);
|
||||
- if (entry->hash != 0xffff) {
|
||||
+ if (entry->hash != 0xffff && set_state) {
|
||||
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
|
||||
|
||||
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
|
||||
@@ -538,7 +539,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
|
||||
if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
|
||||
return;
|
||||
|
||||
- hlist_del_init(&entry->l2_data.list);
|
||||
+ hlist_del_init(&entry->l2_list);
|
||||
+ hlist_del_init(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
@@ -554,66 +556,55 @@ static int __mtk_foe_entry_idle_time(str
|
||||
return now - timestamp;
|
||||
}
|
||||
|
||||
+static bool
|
||||
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
+{
|
||||
+ struct mtk_foe_entry foe = {};
|
||||
+ struct mtk_foe_entry *hwe;
|
||||
+ u16 hash = entry->hash;
|
||||
+ int len;
|
||||
+
|
||||
+ if (hash == 0xffff)
|
||||
+ return false;
|
||||
+
|
||||
+ hwe = mtk_foe_get_entry(ppe, hash);
|
||||
+ len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
|
||||
+ memcpy(&foe, hwe, len);
|
||||
+
|
||||
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
|
||||
+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
|
||||
+ return false;
|
||||
+
|
||||
+ entry->data.ib1 = foe.ib1;
|
||||
+
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
static void
|
||||
mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
{
|
||||
u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
|
||||
struct mtk_flow_entry *cur;
|
||||
- struct mtk_foe_entry *hwe;
|
||||
struct hlist_node *tmp;
|
||||
int idle;
|
||||
|
||||
idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
|
||||
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
|
||||
int cur_idle;
|
||||
- u32 ib1;
|
||||
-
|
||||
- hwe = mtk_foe_get_entry(ppe, cur->hash);
|
||||
- ib1 = READ_ONCE(hwe->ib1);
|
||||
|
||||
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
|
||||
- cur->hash = 0xffff;
|
||||
- __mtk_foe_entry_clear(ppe, cur);
|
||||
+ if (!mtk_flow_entry_update(ppe, cur)) {
|
||||
+ __mtk_foe_entry_clear(ppe, entry, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
|
||||
+ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
|
||||
if (cur_idle >= idle)
|
||||
continue;
|
||||
|
||||
idle = cur_idle;
|
||||
entry->data.ib1 &= ~ib1_ts_mask;
|
||||
- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
|
||||
- }
|
||||
-}
|
||||
-
|
||||
-static void
|
||||
-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
-{
|
||||
- struct mtk_foe_entry foe = {};
|
||||
- struct mtk_foe_entry *hwe;
|
||||
-
|
||||
- spin_lock_bh(&ppe_lock);
|
||||
-
|
||||
- if (entry->type == MTK_FLOW_TYPE_L2) {
|
||||
- mtk_flow_entry_update_l2(ppe, entry);
|
||||
- goto out;
|
||||
+ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
|
||||
}
|
||||
-
|
||||
- if (entry->hash == 0xffff)
|
||||
- goto out;
|
||||
-
|
||||
- hwe = mtk_foe_get_entry(ppe, entry->hash);
|
||||
- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
|
||||
- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
|
||||
- entry->hash = 0xffff;
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
- entry->data.ib1 = foe.ib1;
|
||||
-
|
||||
-out:
|
||||
- spin_unlock_bh(&ppe_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -656,7 +647,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
|
||||
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
{
|
||||
spin_lock_bh(&ppe_lock);
|
||||
- __mtk_foe_entry_clear(ppe, entry);
|
||||
+ __mtk_foe_entry_clear(ppe, entry, true);
|
||||
+ hlist_del_init(&entry->list);
|
||||
spin_unlock_bh(&ppe_lock);
|
||||
}
|
||||
|
||||
@@ -703,8 +695,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
|
||||
{
|
||||
const struct mtk_soc_data *soc = ppe->eth->soc;
|
||||
struct mtk_flow_entry *flow_info;
|
||||
- struct mtk_foe_entry foe = {}, *hwe;
|
||||
struct mtk_foe_mac_info *l2;
|
||||
+ struct mtk_foe_entry *hwe;
|
||||
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
|
||||
int type;
|
||||
|
||||
@@ -712,30 +704,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
|
||||
if (!flow_info)
|
||||
return;
|
||||
|
||||
- flow_info->l2_data.base_flow = entry;
|
||||
flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
|
||||
flow_info->hash = hash;
|
||||
hlist_add_head(&flow_info->list,
|
||||
&ppe->foe_flow[hash / soc->hash_offset]);
|
||||
- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
|
||||
+ hlist_add_head(&flow_info->l2_list, &entry->l2_flows);
|
||||
|
||||
hwe = mtk_foe_get_entry(ppe, hash);
|
||||
- memcpy(&foe, hwe, soc->foe_entry_size);
|
||||
- foe.ib1 &= ib1_mask;
|
||||
- foe.ib1 |= entry->data.ib1 & ~ib1_mask;
|
||||
+ memcpy(&flow_info->data, hwe, soc->foe_entry_size);
|
||||
+ flow_info->data.ib1 &= ib1_mask;
|
||||
+ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
|
||||
|
||||
- l2 = mtk_foe_entry_l2(ppe->eth, &foe);
|
||||
+ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
|
||||
memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
|
||||
|
||||
- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
|
||||
+ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
|
||||
if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
|
||||
- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
|
||||
+ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
|
||||
+ sizeof(flow_info->data.ipv4.new));
|
||||
else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
|
||||
l2->etype = ETH_P_IPV6;
|
||||
|
||||
- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
|
||||
+ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
|
||||
|
||||
- __mtk_foe_entry_commit(ppe, &foe, hash);
|
||||
+ __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
|
||||
}
|
||||
|
||||
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
|
||||
@@ -745,9 +737,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
|
||||
struct mtk_flow_entry *entry;
|
||||
struct mtk_foe_bridge key = {};
|
||||
+ struct mtk_foe_entry foe = {};
|
||||
struct hlist_node *n;
|
||||
struct ethhdr *eh;
|
||||
bool found = false;
|
||||
+ int entry_len;
|
||||
u8 *tag;
|
||||
|
||||
spin_lock_bh(&ppe_lock);
|
||||
@@ -755,20 +749,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
|
||||
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
|
||||
goto out;
|
||||
|
||||
- hlist_for_each_entry_safe(entry, n, head, list) {
|
||||
- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
|
||||
- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
|
||||
- MTK_FOE_STATE_BIND))
|
||||
- continue;
|
||||
-
|
||||
- entry->hash = 0xffff;
|
||||
- __mtk_foe_entry_clear(ppe, entry);
|
||||
- continue;
|
||||
- }
|
||||
+ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
|
||||
+ memcpy(&foe, hwe, entry_len);
|
||||
|
||||
- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
|
||||
+ hlist_for_each_entry_safe(entry, n, head, list) {
|
||||
+ if (found ||
|
||||
+ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
|
||||
if (entry->hash != 0xffff)
|
||||
- entry->hash = 0xffff;
|
||||
+ __mtk_foe_entry_clear(ppe, entry, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -819,9 +807,17 @@ out:
|
||||
|
||||
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
{
|
||||
- mtk_flow_entry_update(ppe, entry);
|
||||
+ int idle;
|
||||
+
|
||||
+ spin_lock_bh(&ppe_lock);
|
||||
+ if (entry->type == MTK_FLOW_TYPE_L2)
|
||||
+ mtk_flow_entry_update_l2(ppe, entry);
|
||||
+ else
|
||||
+ mtk_flow_entry_update(ppe, entry);
|
||||
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
+ spin_unlock_bh(&ppe_lock);
|
||||
|
||||
- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
+ return idle;
|
||||
}
|
||||
|
||||
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
@@ -286,7 +286,12 @@ enum {
|
||||
|
||||
struct mtk_flow_entry {
|
||||
union {
|
||||
- struct hlist_node list;
|
||||
+ /* regular flows + L2 subflows */
|
||||
+ struct {
|
||||
+ struct hlist_node list;
|
||||
+ struct hlist_node l2_list;
|
||||
+ };
|
||||
+ /* L2 flows */
|
||||
struct {
|
||||
struct rhash_head l2_node;
|
||||
struct hlist_head l2_flows;
|
||||
@@ -296,13 +301,7 @@ struct mtk_flow_entry {
|
||||
s8 wed_index;
|
||||
u8 ppe_index;
|
||||
u16 hash;
|
||||
- union {
|
||||
- struct mtk_foe_entry data;
|
||||
- struct {
|
||||
- struct mtk_flow_entry *base_flow;
|
||||
- struct hlist_node list;
|
||||
- } l2_data;
|
||||
- };
|
||||
+ struct mtk_foe_entry data;
|
||||
struct rhash_head node;
|
||||
unsigned long cookie;
|
||||
};
|
||||
@@ -1,342 +0,0 @@
|
||||
From: Felix Fietkau <nbd@nbd.name>
|
||||
Date: Thu, 23 Mar 2023 11:05:22 +0100
|
||||
Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
|
||||
flows
|
||||
|
||||
For L2 flows, the packet/byte counters should report the sum of the
|
||||
counters of their subflows, both current and expired.
|
||||
In order to make this work, change the way that accounting data is tracked.
|
||||
Reset counters when a flow enters bind. Once it expires (or enters unbind),
|
||||
store the last counter value in struct mtk_flow_entry.
|
||||
|
||||
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
---
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||||
@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
|
||||
- !(val & MTK_PPE_MIB_SER_CR_ST),
|
||||
- 20, MTK_PPE_WAIT_TIMEOUT_US);
|
||||
+ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
|
||||
+ !(val & MTK_PPE_MIB_SER_CR_ST),
|
||||
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
|
||||
|
||||
if (ret)
|
||||
dev_err(ppe->dev, "MIB table busy");
|
||||
@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
|
||||
return ret;
|
||||
}
|
||||
|
||||
-static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
|
||||
+static inline struct mtk_foe_accounting *
|
||||
+mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
|
||||
+{
|
||||
+ if (!ppe->acct_table)
|
||||
+ return NULL;
|
||||
+
|
||||
+ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
|
||||
+}
|
||||
+
|
||||
+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
|
||||
{
|
||||
u32 val, cnt_r0, cnt_r1, cnt_r2;
|
||||
+ struct mtk_foe_accounting *acct;
|
||||
int ret;
|
||||
|
||||
val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
|
||||
ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
|
||||
|
||||
+ acct = mtk_ppe_acct_data(ppe, index);
|
||||
+ if (!acct)
|
||||
+ return NULL;
|
||||
+
|
||||
ret = mtk_ppe_mib_wait_busy(ppe);
|
||||
if (ret)
|
||||
- return ret;
|
||||
+ return acct;
|
||||
|
||||
cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
|
||||
cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
|
||||
@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
|
||||
if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
|
||||
/* 64 bit for each counter */
|
||||
u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
|
||||
- *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
|
||||
- *packets = ((u64)cnt_r3 << 32) | cnt_r2;
|
||||
+ acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0;
|
||||
+ acct->packets += ((u64)cnt_r3 << 32) | cnt_r2;
|
||||
} else {
|
||||
/* 48 bit byte counter, 40 bit packet counter */
|
||||
u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
|
||||
u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
|
||||
u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
|
||||
u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
|
||||
- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
|
||||
- *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
|
||||
+ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
|
||||
+ acct->packets += (pkt_cnt_high << 16) | pkt_cnt_low;
|
||||
}
|
||||
|
||||
- return 0;
|
||||
+ return acct;
|
||||
}
|
||||
|
||||
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
|
||||
@@ -526,13 +540,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
|
||||
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
|
||||
dma_wmb();
|
||||
mtk_ppe_cache_clear(ppe);
|
||||
- if (ppe->accounting) {
|
||||
- struct mtk_foe_accounting *acct;
|
||||
-
|
||||
- acct = ppe->acct_table + entry->hash * sizeof(*acct);
|
||||
- acct->packets = 0;
|
||||
- acct->bytes = 0;
|
||||
- }
|
||||
}
|
||||
entry->hash = 0xffff;
|
||||
|
||||
@@ -557,11 +564,14 @@ static int __mtk_foe_entry_idle_time(str
|
||||
}
|
||||
|
||||
static bool
|
||||
-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
|
||||
+ u64 *packets, u64 *bytes)
|
||||
{
|
||||
+ struct mtk_foe_accounting *acct;
|
||||
struct mtk_foe_entry foe = {};
|
||||
struct mtk_foe_entry *hwe;
|
||||
u16 hash = entry->hash;
|
||||
+ bool ret = false;
|
||||
int len;
|
||||
|
||||
if (hash == 0xffff)
|
||||
@@ -572,18 +582,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
|
||||
memcpy(&foe, hwe, len);
|
||||
|
||||
if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
|
||||
- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
|
||||
- return false;
|
||||
+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
|
||||
+ acct = mtk_ppe_acct_data(ppe, hash);
|
||||
+ if (acct) {
|
||||
+ entry->prev_packets += acct->packets;
|
||||
+ entry->prev_bytes += acct->bytes;
|
||||
+ }
|
||||
+
|
||||
+ goto out;
|
||||
+ }
|
||||
|
||||
entry->data.ib1 = foe.ib1;
|
||||
+ acct = mtk_ppe_mib_entry_read(ppe, hash);
|
||||
+ ret = true;
|
||||
+
|
||||
+out:
|
||||
+ if (acct) {
|
||||
+ *packets += acct->packets;
|
||||
+ *bytes += acct->bytes;
|
||||
+ }
|
||||
|
||||
- return true;
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
{
|
||||
u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
|
||||
+ u64 *packets = &entry->packets;
|
||||
+ u64 *bytes = &entry->bytes;
|
||||
struct mtk_flow_entry *cur;
|
||||
struct hlist_node *tmp;
|
||||
int idle;
|
||||
@@ -592,7 +619,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
|
||||
hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
|
||||
int cur_idle;
|
||||
|
||||
- if (!mtk_flow_entry_update(ppe, cur)) {
|
||||
+ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
|
||||
+ entry->prev_packets += cur->prev_packets;
|
||||
+ entry->prev_bytes += cur->prev_bytes;
|
||||
__mtk_foe_entry_clear(ppe, entry, false);
|
||||
continue;
|
||||
}
|
||||
@@ -607,10 +636,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
|
||||
}
|
||||
}
|
||||
|
||||
+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
|
||||
+ int *idle)
|
||||
+{
|
||||
+ entry->packets = entry->prev_packets;
|
||||
+ entry->bytes = entry->prev_bytes;
|
||||
+
|
||||
+ spin_lock_bh(&ppe_lock);
|
||||
+
|
||||
+ if (entry->type == MTK_FLOW_TYPE_L2)
|
||||
+ mtk_flow_entry_update_l2(ppe, entry);
|
||||
+ else
|
||||
+ mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes);
|
||||
+
|
||||
+ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
+
|
||||
+ spin_unlock_bh(&ppe_lock);
|
||||
+}
|
||||
+
|
||||
static void
|
||||
__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
|
||||
u16 hash)
|
||||
{
|
||||
+ struct mtk_foe_accounting *acct;
|
||||
struct mtk_eth *eth = ppe->eth;
|
||||
u16 timestamp = mtk_eth_timestamp(eth);
|
||||
struct mtk_foe_entry *hwe;
|
||||
@@ -641,6 +689,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
|
||||
|
||||
dma_wmb();
|
||||
|
||||
+ acct = mtk_ppe_mib_entry_read(ppe, hash);
|
||||
+ if (acct) {
|
||||
+ acct->packets = 0;
|
||||
+ acct->bytes = 0;
|
||||
+ }
|
||||
+
|
||||
mtk_ppe_cache_clear(ppe);
|
||||
}
|
||||
|
||||
@@ -805,21 +859,6 @@ out:
|
||||
spin_unlock_bh(&ppe_lock);
|
||||
}
|
||||
|
||||
-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||||
-{
|
||||
- int idle;
|
||||
-
|
||||
- spin_lock_bh(&ppe_lock);
|
||||
- if (entry->type == MTK_FLOW_TYPE_L2)
|
||||
- mtk_flow_entry_update_l2(ppe, entry);
|
||||
- else
|
||||
- mtk_flow_entry_update(ppe, entry);
|
||||
- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
|
||||
- spin_unlock_bh(&ppe_lock);
|
||||
-
|
||||
- return idle;
|
||||
-}
|
||||
-
|
||||
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
|
||||
{
|
||||
if (!ppe)
|
||||
@@ -847,32 +886,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
|
||||
return mtk_ppe_wait_busy(ppe);
|
||||
}
|
||||
|
||||
-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
|
||||
- struct mtk_foe_accounting *diff)
|
||||
-{
|
||||
- struct mtk_foe_accounting *acct;
|
||||
- int size = sizeof(struct mtk_foe_accounting);
|
||||
- u64 bytes, packets;
|
||||
-
|
||||
- if (!ppe->accounting)
|
||||
- return NULL;
|
||||
-
|
||||
- if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
|
||||
- return NULL;
|
||||
-
|
||||
- acct = ppe->acct_table + index * size;
|
||||
-
|
||||
- acct->bytes += bytes;
|
||||
- acct->packets += packets;
|
||||
-
|
||||
- if (diff) {
|
||||
- diff->bytes = bytes;
|
||||
- diff->packets = packets;
|
||||
- }
|
||||
-
|
||||
- return acct;
|
||||
-}
|
||||
-
|
||||
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
|
||||
{
|
||||
bool accounting = eth->soc->has_accounting;
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||||
@@ -304,6 +304,8 @@ struct mtk_flow_entry {
|
||||
struct mtk_foe_entry data;
|
||||
struct rhash_head node;
|
||||
unsigned long cookie;
|
||||
+ u64 prev_packets, prev_bytes;
|
||||
+ u64 packets, bytes;
|
||||
};
|
||||
|
||||
struct mtk_mib_entry {
|
||||
@@ -347,6 +349,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth)
|
||||
void mtk_ppe_start(struct mtk_ppe *ppe);
|
||||
int mtk_ppe_stop(struct mtk_ppe *ppe);
|
||||
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
|
||||
+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
|
||||
|
||||
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
|
||||
|
||||
@@ -395,9 +398,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
|
||||
unsigned int queue);
|
||||
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
|
||||
int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
|
||||
-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
|
||||
- struct mtk_foe_accounting *diff);
|
||||
+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
|
||||
+ int *idle);
|
||||
|
||||
#endif
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
|
||||
@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
|
||||
if (bind && state != MTK_FOE_STATE_BIND)
|
||||
continue;
|
||||
|
||||
- acct = mtk_foe_entry_get_mib(ppe, i, NULL);
|
||||
+ acct = mtk_ppe_mib_entry_read(ppe, i);
|
||||
|
||||
type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
|
||||
seq_printf(m, "%05x %s %7s", i,
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
||||
@@ -499,24 +499,21 @@ static int
|
||||
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
|
||||
{
|
||||
struct mtk_flow_entry *entry;
|
||||
- struct mtk_foe_accounting diff;
|
||||
- u32 idle;
|
||||
+ u64 packets, bytes;
|
||||
+ int idle;
|
||||
|
||||
entry = rhashtable_lookup(ð->flow_table, &f->cookie,
|
||||
mtk_flow_ht_params);
|
||||
if (!entry)
|
||||
return -ENOENT;
|
||||
|
||||
- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
|
||||
+ packets = entry->packets;
|
||||
+ bytes = entry->bytes;
|
||||
+ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle);
|
||||
+ f->stats.pkts += entry->packets - packets;
|
||||
+ f->stats.bytes += entry->bytes - bytes;
|
||||
f->stats.lastused = jiffies - idle * HZ;
|
||||
|
||||
- if (entry->hash != 0xFFFF &&
|
||||
- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
|
||||
- &diff)) {
|
||||
- f->stats.pkts += diff.packets;
|
||||
- f->stats.bytes += diff.bytes;
|
||||
- }
|
||||
-
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -249,7 +249,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
-
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -431,6 +431,30 @@ static void mtk_setup_bridge_switch(stru
|
||||
@@ -432,6 +432,30 @@ static void mtk_setup_bridge_switch(stru
|
||||
MTK_GSW_CFG);
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
@@ -439,12 +463,20 @@ static struct phylink_pcs *mtk_mac_selec
|
||||
@@ -440,12 +464,20 @@ static struct phylink_pcs *mtk_mac_selec
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
unsigned int sid;
|
||||
|
||||
@@ -307,7 +307,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -500,7 +532,22 @@ static void mtk_mac_config(struct phylin
|
||||
@@ -501,7 +533,22 @@ static void mtk_mac_config(struct phylin
|
||||
goto init_err;
|
||||
}
|
||||
break;
|
||||
@@ -330,7 +330,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
break;
|
||||
default:
|
||||
goto err_phy;
|
||||
@@ -555,8 +602,6 @@ static void mtk_mac_config(struct phylin
|
||||
@@ -556,8 +603,6 @@ static void mtk_mac_config(struct phylin
|
||||
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
|
||||
val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
|
||||
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
|
||||
@@ -339,7 +339,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
}
|
||||
|
||||
/* SGMII */
|
||||
@@ -573,21 +618,40 @@ static void mtk_mac_config(struct phylin
|
||||
@@ -574,21 +619,40 @@ static void mtk_mac_config(struct phylin
|
||||
|
||||
/* Save the syscfg0 value for mac_finish */
|
||||
mac->syscfg0 = val;
|
||||
@@ -387,7 +387,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
return;
|
||||
|
||||
err_phy:
|
||||
@@ -632,10 +696,14 @@ static void mtk_mac_link_down(struct phy
|
||||
@@ -633,10 +697,14 @@ static void mtk_mac_link_down(struct phy
|
||||
{
|
||||
struct mtk_mac *mac = container_of(config, struct mtk_mac,
|
||||
phylink_config);
|
||||
@@ -405,7 +405,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
}
|
||||
|
||||
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
|
||||
@@ -707,13 +775,11 @@ static void mtk_set_queue_speed(struct m
|
||||
@@ -708,13 +776,11 @@ static void mtk_set_queue_speed(struct m
|
||||
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
u32 mcr;
|
||||
|
||||
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
|
||||
@@ -747,6 +813,55 @@ static void mtk_mac_link_up(struct phyli
|
||||
@@ -748,6 +814,55 @@ static void mtk_mac_link_up(struct phyli
|
||||
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
|
||||
}
|
||||
|
||||
@@ -479,7 +479,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
static const struct phylink_mac_ops mtk_phylink_ops = {
|
||||
.validate = phylink_generic_validate,
|
||||
.mac_select_pcs = mtk_mac_select_pcs,
|
||||
@@ -4560,8 +4675,21 @@ static int mtk_add_mac(struct mtk_eth *e
|
||||
@@ -4561,8 +4676,21 @@ static int mtk_add_mac(struct mtk_eth *e
|
||||
phy_interface_zero(mac->phylink_config.supported_interfaces);
|
||||
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||
mac->phylink_config.supported_interfaces);
|
||||
@@ -501,7 +501,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
phylink = phylink_create(&mac->phylink_config,
|
||||
of_fwnode_handle(mac->of_node),
|
||||
phy_mode, &mtk_phylink_ops);
|
||||
@@ -4754,6 +4882,13 @@ static int mtk_probe(struct platform_dev
|
||||
@@ -4755,6 +4883,13 @@ static int mtk_probe(struct platform_dev
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -32,7 +32,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
|
||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
||||
@@ -4830,7 +4830,10 @@ static int mtk_probe(struct platform_dev
|
||||
@@ -4831,7 +4831,10 @@ static int mtk_probe(struct platform_dev
|
||||
}
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
|
||||
|
||||
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
/**
|
||||
* napi_disable - prevent NAPI from scheduling
|
||||
@@ -3363,6 +3364,7 @@ struct softnet_data {
|
||||
@@ -3364,6 +3365,7 @@ struct softnet_data {
|
||||
unsigned int processed;
|
||||
unsigned int time_squeeze;
|
||||
unsigned int received_rps;
|
||||
|
||||
Reference in New Issue
Block a user