 cddd459140
			
		
	
	cddd459140
	
	
	
		
			
			Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release which was tagged LSDK-20.04-V5.4. https://source.codeaurora.org/external/qoriq/qoriq-components/linux/ For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in LSDK, port the dts patches from 4.14. The patches are sorted into the following categories: 301-arch-xxxx 302-dts-xxxx 303-core-xxxx 701-net-xxxx 801-audio-xxxx 802-can-xxxx 803-clock-xxxx 804-crypto-xxxx 805-display-xxxx 806-dma-xxxx 807-gpio-xxxx 808-i2c-xxxx 809-jailhouse-xxxx 810-keys-xxxx 811-kvm-xxxx 812-pcie-xxxx 813-pm-xxxx 814-qe-xxxx 815-sata-xxxx 816-sdhc-xxxx 817-spi-xxxx 818-thermal-xxxx 819-uart-xxxx 820-usb-xxxx 821-vfio-xxxx Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
		
			
				
	
	
		
			108 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			108 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| From 26d3cc476c26832e1e05db182ac27906f6c81f2d Mon Sep 17 00:00:00 2001
 | |
| From: Camelia Groza <camelia.groza@nxp.com>
 | |
| Date: Tue, 29 Oct 2019 16:12:18 +0200
 | |
| Subject: [PATCH] sdk_dpaa: ls1043a errata: memory related fixes
 | |
| 
 | |
| Avoid a crash by verifying the allocation return status.
 | |
| 
 | |
| Use the standard API for determining the page order needed for
 | |
| allocating Jumbo sized skbs.
 | |
| 
 | |
| Explicitly remove the old skb outside the w/a, for both successful and
 | |
| unsuccessful realignments. Make sure the old skb's memory isn't leaked.
 | |
| 
 | |
| Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
 | |
| ---
 | |
|  .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c  | 30 ++++++++++++++--------
 | |
|  1 file changed, 19 insertions(+), 11 deletions(-)
 | |
| 
 | |
| --- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
 | |
| +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
 | |
| @@ -809,8 +809,8 @@ static struct sk_buff *a010022_realign_s
 | |
|  {
 | |
|  	int trans_offset = skb_transport_offset(skb);
 | |
|  	int net_offset = skb_network_offset(skb);
 | |
| -	int nsize, headroom, npage_order;
 | |
|  	struct sk_buff *nskb = NULL;
 | |
| +	int nsize, headroom;
 | |
|  	struct page *npage;
 | |
|  	void *npage_addr;
 | |
|  
 | |
| @@ -825,8 +825,7 @@ static struct sk_buff *a010022_realign_s
 | |
|  		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 | |
|  
 | |
|  	/* Reserve enough memory to accommodate Jumbo frames */
 | |
| -	npage_order = (nsize - 1) / PAGE_SIZE;
 | |
| -	npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order);
 | |
| +	npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, get_order(nsize));
 | |
|  	if (unlikely(!npage)) {
 | |
|  		WARN_ONCE(1, "Memory allocation failure\n");
 | |
|  		return NULL;
 | |
| @@ -869,7 +868,6 @@ static struct sk_buff *a010022_realign_s
 | |
|  	/* We don't want the buffer to be recycled so we mark it accordingly */
 | |
|  	nskb->mark = NONREC_MARK;
 | |
|  
 | |
| -	dev_kfree_skb(skb);
 | |
|  	return nskb;
 | |
|  
 | |
|  err:
 | |
| @@ -911,8 +909,13 @@ int __hot skb_to_sg_fd(struct dpa_priv_s
 | |
|  	 * is in place and we need to avoid crossing a 4k boundary.
 | |
|  	 */
 | |
|  #ifndef CONFIG_PPC
 | |
| -	if (unlikely(dpaa_errata_a010022))
 | |
| -		sgt_buf = page_address(alloc_page(GFP_ATOMIC));
 | |
| +	if (unlikely(dpaa_errata_a010022)) {
 | |
| +		struct page *new_page = alloc_page(GFP_ATOMIC);
 | |
| +
 | |
| +		if (unlikely(!new_page))
 | |
| +			return -ENOMEM;
 | |
| +		sgt_buf = page_address(new_page);
 | |
| +	}
 | |
|  	else
 | |
|  #endif
 | |
|  		sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
 | |
| @@ -1061,6 +1064,7 @@ int __hot dpa_tx_extended(struct sk_buff
 | |
|  	int err = 0;
 | |
|  	bool nonlinear;
 | |
|  	int *countptr, offset = 0;
 | |
| +	struct sk_buff *nskb;
 | |
|  
 | |
|  	priv = netdev_priv(net_dev);
 | |
|  	/* Non-migratable context, safe to use raw_cpu_ptr */
 | |
| @@ -1072,9 +1076,11 @@ int __hot dpa_tx_extended(struct sk_buff
 | |
|  
 | |
|  #ifndef CONFIG_PPC
 | |
|  	if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) {
 | |
| -		skb = a010022_realign_skb(skb, priv);
 | |
| -		if (!skb)
 | |
| +		nskb = a010022_realign_skb(skb, priv);
 | |
| +		if (!nskb)
 | |
|  			goto skb_to_fd_failed;
 | |
| +		dev_kfree_skb(skb);
 | |
| +		skb = nskb;
 | |
|  	}
 | |
|  #endif
 | |
|  
 | |
| @@ -1130,15 +1136,17 @@ int __hot dpa_tx_extended(struct sk_buff
 | |
|  
 | |
|  		/* Code borrowed from skb_unshare(). */
 | |
|  		if (skb_cloned(skb)) {
 | |
| -			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
 | |
| +			nskb = skb_copy(skb, GFP_ATOMIC);
 | |
|  			kfree_skb(skb);
 | |
|  			skb = nskb;
 | |
|  #ifndef CONFIG_PPC
 | |
|  			if (unlikely(dpaa_errata_a010022) &&
 | |
|  			    a010022_check_skb(skb, priv)) {
 | |
| -				skb = a010022_realign_skb(skb, priv);
 | |
| -				if (!skb)
 | |
| +				nskb = a010022_realign_skb(skb, priv);
 | |
| +				if (!nskb)
 | |
|  					goto skb_to_fd_failed;
 | |
| +				dev_kfree_skb(skb);
 | |
| +				skb = nskb;
 | |
|  			}
 | |
|  #endif
 | |
|  			/* skb_copy() has now linearized the skbuff. */
 |