Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release which was tagged LSDK-20.04-V5.4. https://source.codeaurora.org/external/qoriq/qoriq-components/linux/ For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in LSDK, port the dts patches from 4.14. The patches are sorted into the following categories: 301-arch-xxxx 302-dts-xxxx 303-core-xxxx 701-net-xxxx 801-audio-xxxx 802-can-xxxx 803-clock-xxxx 804-crypto-xxxx 805-display-xxxx 806-dma-xxxx 807-gpio-xxxx 808-i2c-xxxx 809-jailhouse-xxxx 810-keys-xxxx 811-kvm-xxxx 812-pcie-xxxx 813-pm-xxxx 814-qe-xxxx 815-sata-xxxx 816-sdhc-xxxx 817-spi-xxxx 818-thermal-xxxx 819-uart-xxxx 820-usb-xxxx 821-vfio-xxxx Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
		
			
				
	
	
		
			78 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			78 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From 0c7cb8b132f28cd150eb578a73c959de736364a2 Mon Sep 17 00:00:00 2001
 | 
						|
From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
 | 
						|
Date: Mon, 16 Sep 2019 13:15:02 +0300
 | 
						|
Subject: [PATCH] dpaa2-eth: Update FQ taildrop threshold and buffer pool count
 | 
						|
 | 
						|
Now that we have congestion group taildrop configured at all
 | 
						|
times, we can afford to increase the frame queue taildrop
 | 
						|
threshold; this will ensure a better response when receiving
 | 
						|
bursts of large-sized frames.
 | 
						|
 | 
						|
Also decouple the buffer pool count from the Rx FQ taildrop
 | 
						|
threshold, as above change would increase it too much. Instead,
 | 
						|
keep the old count as a hardcoded value.
 | 
						|
 | 
						|
With the new limits, we try to ensure that:
 | 
						|
* we allow enough leeway for large frame bursts (by buffering
 | 
						|
enough of them in queues to avoid heavy dropping in case of
 | 
						|
bursty traffic, but when overall ingress bandwidth is manageable)
 | 
						|
* allow pending frames to be evenly spread between ingress FQs,
 | 
						|
regardless of frame size
 | 
						|
* avoid dropping frames due to the buffer pool being empty; this
 | 
						|
is not a bad behaviour per se, but system overall response is
 | 
						|
more linear and predictable when frames are dropped at frame
 | 
						|
queue/group level.
 | 
						|
 | 
						|
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
 | 
						|
---
 | 
						|
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 23 +++++++++++------------
 | 
						|
 1 file changed, 11 insertions(+), 12 deletions(-)
 | 
						|
 | 
						|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
 | 
						|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
 | 
						|
@@ -35,24 +35,24 @@
 | 
						|
 /* Convert L3 MTU to L2 MFL */
 | 
						|
 #define DPAA2_ETH_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN)
 | 
						|
 
 | 
						|
-/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
 | 
						|
- * frames in the Rx queues (length of the current frame is not
 | 
						|
- * taken into account when making the taildrop decision)
 | 
						|
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
 | 
						|
+ * enuough number of jumbo frames in the Rx queues (length of the current
 | 
						|
+ * frame is not taken into account when making the taildrop decision)
 | 
						|
  */
 | 
						|
-#define DPAA2_ETH_FQ_TAILDROP_THRESH	(64 * 1024)
 | 
						|
+#define DPAA2_ETH_FQ_TAILDROP_THRESH	(1024 * 1024)
 | 
						|
 
 | 
						|
 /* Maximum number of Tx confirmation frames to be processed
 | 
						|
  * in a single NAPI call
 | 
						|
  */
 | 
						|
 #define DPAA2_ETH_TXCONF_PER_NAPI	256
 | 
						|
 
 | 
						|
-/* Buffer quota per queue. Must be large enough such that for minimum sized
 | 
						|
- * frames taildrop kicks in before the bpool gets depleted, so we compute
 | 
						|
- * how many 64B frames fit inside the taildrop threshold and add a margin
 | 
						|
- * to accommodate the buffer refill delay.
 | 
						|
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
 | 
						|
+ * in flight: for small sized frames, congestion group taildrop may kick in
 | 
						|
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
 | 
						|
+ * reasonable number of frames will be pending at any given time.
 | 
						|
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
 | 
						|
  */
 | 
						|
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE	(DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
 | 
						|
-#define DPAA2_ETH_NUM_BUFS		(DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
 | 
						|
+#define DPAA2_ETH_NUM_BUFS		1280
 | 
						|
 #define DPAA2_ETH_REFILL_THRESH \
 | 
						|
 	(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
 | 
						|
 
 | 
						|
@@ -62,8 +62,7 @@
 | 
						|
  * taildrop kicks in
 | 
						|
  */
 | 
						|
 #define DPAA2_ETH_CG_TAILDROP_THRESH(priv)				\
 | 
						|
-	(DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) /	\
 | 
						|
-	 dpaa2_eth_tc_count(priv))
 | 
						|
+	(1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
 | 
						|
 
 | 
						|
 /* Maximum number of buffers that can be acquired/released through a single
 | 
						|
  * QBMan command
 |