Increase DMA burst size and tx ring size and optimize tx processing Signed-off-by: Felix Fietkau <nbd@nbd.name>
		
			
				
	
	
		
			51 lines
		
	
	
		
			1.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			51 lines
		
	
	
		
			1.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From: Felix Fietkau <nbd@nbd.name>
 | 
						|
Date: Wed, 26 Aug 2020 16:55:54 +0200
 | 
						|
Subject: [PATCH] net: ethernet: mtk_eth_soc: fix unnecessary tx queue
 | 
						|
 stops
 | 
						|
 | 
						|
When running short on descriptors, only stop the queue for the netdev that tx
 | 
						|
was attempted for. By the time the something tries to send on the other netdev,
 | 
						|
the ring might have some more room already
 | 
						|
 | 
						|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
 | 
						|
---
 | 
						|
 | 
						|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 | 
						|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 | 
						|
@@ -1126,17 +1126,6 @@ static void mtk_wake_queue(struct mtk_et
 | 
						|
 	}
 | 
						|
 }
 | 
						|
 
 | 
						|
-static void mtk_stop_queue(struct mtk_eth *eth)
 | 
						|
-{
 | 
						|
-	int i;
 | 
						|
-
 | 
						|
-	for (i = 0; i < MTK_MAC_COUNT; i++) {
 | 
						|
-		if (!eth->netdev[i])
 | 
						|
-			continue;
 | 
						|
-		netif_stop_queue(eth->netdev[i]);
 | 
						|
-	}
 | 
						|
-}
 | 
						|
-
 | 
						|
 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
 | 
						|
 {
 | 
						|
 	struct mtk_mac *mac = netdev_priv(dev);
 | 
						|
@@ -1157,7 +1146,7 @@ static int mtk_start_xmit(struct sk_buff
 | 
						|
 
 | 
						|
 	tx_num = mtk_cal_txd_req(skb);
 | 
						|
 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
 | 
						|
-		mtk_stop_queue(eth);
 | 
						|
+		netif_stop_queue(dev);
 | 
						|
 		netif_err(eth, tx_queued, dev,
 | 
						|
 			  "Tx Ring full when queue awake!\n");
 | 
						|
 		spin_unlock(ð->page_lock);
 | 
						|
@@ -1183,7 +1172,7 @@ static int mtk_start_xmit(struct sk_buff
 | 
						|
 		goto drop;
 | 
						|
 
 | 
						|
 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
 | 
						|
-		mtk_stop_queue(eth);
 | 
						|
+		netif_stop_queue(dev);
 | 
						|
 
 | 
						|
 	spin_unlock(ð->page_lock);
 | 
						|
 
 |