76 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			76 lines
		
	
	
		
			3.2 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| commit 9a5d2bd99e0dfe9a31b3c160073ac445ba3d773f
 | |
| Author: David Woodhouse <dwmw2@infradead.org>
 | |
| Date:   Sun Apr 8 10:01:44 2012 +0000
 | |
| 
 | |
|     ppp: Fix race condition with queue start/stop
 | |
|     
 | |
|     Commit e675f0cc9a872fd152edc0c77acfed19bf28b81e ("ppp: Don't stop and
 | |
|     restart queue on every TX packet") introduced a race condition which
 | |
|     could leave the net queue stopped even when the channel is no longer
 | |
|     busy. By calling netif_stop_queue() from ppp_start_xmit(), based on the
 | |
|     return value from ppp_xmit_process() but *after* all the locks have been
 | |
|     dropped, we could potentially do so *after* the channel has actually
 | |
|     finished transmitting and attempted to re-wake the queue.
 | |
|     
 | |
|     Fix this by moving the netif_stop_queue() into ppp_xmit_process() under
 | |
|     the xmit lock. I hadn't done this previously, because it gets called
 | |
|     from other places than ppp_start_xmit(). But I now think it's the better
 | |
|     option. The net queue *should* be stopped if the channel becomes
 | |
|     congested due to writes from pppd, anyway.
 | |
|     
 | |
|     Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
 | |
|     Signed-off-by: David S. Miller <davem@davemloft.net>
 | |
| 
 | |
| commit e675f0cc9a872fd152edc0c77acfed19bf28b81e
 | |
| Author: David Woodhouse <dwmw2@infradead.org>
 | |
| Date:   Mon Mar 26 00:03:42 2012 +0000
 | |
| 
 | |
|     ppp: Don't stop and restart queue on every TX packet
 | |
|     
 | |
|     For every transmitted packet, ppp_start_xmit() will stop the netdev
 | |
|     queue and then, if appropriate, restart it. This causes the TX softirq
 | |
|     to run, entirely gratuitously.
 | |
|     
 | |
|     This is "only" a waste of CPU time in the normal case, but it's actively
 | |
|     harmful when the PPP device is a TEQL slave — the wakeup will cause the
 | |
|     offending device to receive the next TX packet from the TEQL queue, when
 | |
|     it *should* have gone to the next slave in the list. We end up seeing
 | |
|     large bursts of packets on just *one* slave device, rather than using
 | |
|     the full available bandwidth over all slaves.
 | |
|     
 | |
|     This patch fixes the problem by *not* unconditionally stopping the queue
 | |
|     in ppp_start_xmit(). It adds a return value from ppp_xmit_process()
 | |
|     which indicates whether the queue should be stopped or not.
 | |
|     
 | |
|     It *doesn't* remove the call to netif_wake_queue() from
 | |
|     ppp_xmit_process(), because other code paths (especially from
 | |
|     ppp_output_wakeup()) need it there and it's messy to push it out to the
 | |
|     other callers to do it based on the return value. So we leave it in
 | |
|     place — it's a no-op in the case where the queue wasn't stopped, so it's
 | |
|     harmless in the TX path.
 | |
|     
 | |
|     Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
 | |
|     Signed-off-by: David S. Miller <davem@davemloft.net>
 | |
| 
 | |
| 
 | |
| 
 | |
| --- a/drivers/net/ppp/ppp_generic.c
 | |
| +++ b/drivers/net/ppp/ppp_generic.c
 | |
| @@ -968,7 +968,6 @@ ppp_start_xmit(struct sk_buff *skb, stru
 | |
|  	proto = npindex_to_proto[npi];
 | |
|  	put_unaligned_be16(proto, pp);
 | |
|  
 | |
| -	netif_stop_queue(dev);
 | |
|  	skb_queue_tail(&ppp->file.xq, skb);
 | |
|  	ppp_xmit_process(ppp);
 | |
|  	return NETDEV_TX_OK;
 | |
| @@ -1063,6 +1062,8 @@ ppp_xmit_process(struct ppp *ppp)
 | |
|  		   code that we can accept some more. */
 | |
|  		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
 | |
|  			netif_wake_queue(ppp->dev);
 | |
| +		else
 | |
| +			netif_stop_queue(ppp->dev);
 | |
|  	}
 | |
|  	ppp_xmit_unlock(ppp);
 | |
|  }
 | 
