kernel: update kernel 4.4 to version 4.4.11
Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com>
This commit is contained in:
		@@ -4,11 +4,11 @@ LINUX_RELEASE?=1
 | 
			
		||||
 | 
			
		||||
LINUX_VERSION-3.18 = .29
 | 
			
		||||
LINUX_VERSION-4.1 = .20
 | 
			
		||||
LINUX_VERSION-4.4 = .10
 | 
			
		||||
LINUX_VERSION-4.4 = .11
 | 
			
		||||
 | 
			
		||||
LINUX_KERNEL_MD5SUM-3.18.29 = b25737a0bc98e80d12200de93f239c28
 | 
			
		||||
LINUX_KERNEL_MD5SUM-4.1.20 = 075c38a3a23ca5bc80437b13606df00a
 | 
			
		||||
LINUX_KERNEL_MD5SUM-4.4.10 = f7033cbe05e1359a347815ca52d051ed
 | 
			
		||||
LINUX_KERNEL_MD5SUM-4.4.11 = 58b2eaccb3cec0d78e46ff4e968b431a
 | 
			
		||||
 | 
			
		||||
ifdef KERNEL_PATCHVER
 | 
			
		||||
  LINUX_VERSION:=$(KERNEL_PATCHVER)$(strip $(LINUX_VERSION-$(KERNEL_PATCHVER)))
 | 
			
		||||
 
 | 
			
		||||
@@ -12,7 +12,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 | 
			
		||||
 | 
			
		||||
--- a/arch/mips/ath79/common.c
 | 
			
		||||
+++ b/arch/mips/ath79/common.c
 | 
			
		||||
@@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
 | 
			
		||||
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
 | 
			
		||||
 
 | 
			
		||||
 void ath79_ddr_wb_flush(u32 reg)
 | 
			
		||||
 {
 | 
			
		||||
 
 | 
			
		||||
@@ -29,9 +29,7 @@
 | 
			
		||||
-	u32 bootstrap;
 | 
			
		||||
+	void __iomem *phy_reg;
 | 
			
		||||
+	u32 t;
 | 
			
		||||
 
 | 
			
		||||
-	bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
 | 
			
		||||
-	if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE)
 | 
			
		||||
+
 | 
			
		||||
+	phy_reg = ioremap(base, 4);
 | 
			
		||||
+	if (!phy_reg)
 | 
			
		||||
+		return;
 | 
			
		||||
@@ -43,7 +41,9 @@
 | 
			
		||||
+
 | 
			
		||||
+	iounmap(phy_reg);
 | 
			
		||||
+}
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
-	bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
 | 
			
		||||
-	if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE)
 | 
			
		||||
+static void ar934x_usb_reset_notifier(struct platform_device *pdev)
 | 
			
		||||
+{
 | 
			
		||||
+	if (pdev->id != -1)
 | 
			
		||||
 
 | 
			
		||||
@@ -155,7 +155,7 @@
 | 
			
		||||
+#define AR934X_RESET_LUT		BIT(2)
 | 
			
		||||
+#define AR934X_RESET_MBOX		BIT(1)
 | 
			
		||||
+#define AR934X_RESET_I2S		BIT(0)
 | 
			
		||||
 
 | 
			
		||||
+
 | 
			
		||||
+#define QCA955X_RESET_HOST		BIT(31)
 | 
			
		||||
+#define QCA955X_RESET_SLIC		BIT(30)
 | 
			
		||||
+#define QCA955X_RESET_HDMA		BIT(29)
 | 
			
		||||
@@ -188,7 +188,7 @@
 | 
			
		||||
+#define QCA955X_RESET_LUT		BIT(2)
 | 
			
		||||
+#define QCA955X_RESET_MBOX		BIT(1)
 | 
			
		||||
+#define QCA955X_RESET_I2S		BIT(0)
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN	BIT(18)
 | 
			
		||||
+#define AR933X_BOOTSTRAP_EEPBUSY	BIT(4)
 | 
			
		||||
 #define AR933X_BOOTSTRAP_REF_CLK_40	BIT(0)
 | 
			
		||||
 
 | 
			
		||||
@@ -135,8 +135,7 @@
 | 
			
		||||
+static void __init ap136_common_setup(void)
 | 
			
		||||
+{
 | 
			
		||||
+	u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
 | 
			
		||||
 
 | 
			
		||||
-static int ap136_pci_plat_dev_init(struct pci_dev *dev)
 | 
			
		||||
+
 | 
			
		||||
+	ath79_register_m25p80(NULL);
 | 
			
		||||
+
 | 
			
		||||
+	ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
 | 
			
		||||
@@ -151,7 +150,8 @@
 | 
			
		||||
+	ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET, NULL);
 | 
			
		||||
+
 | 
			
		||||
+	ath79_setup_qca955x_eth_cfg(QCA955X_ETH_CFG_RGMII_EN);
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
-static int ap136_pci_plat_dev_init(struct pci_dev *dev)
 | 
			
		||||
+	ath79_register_mdio(0, 0x0);
 | 
			
		||||
+	ath79_init_mac(ath79_eth0_data.mac_addr, art + AP136_MAC0_OFFSET, 0);
 | 
			
		||||
+
 | 
			
		||||
@@ -211,16 +211,16 @@
 | 
			
		||||
+	/* GMAC0 of the AR8327 switch is connected to GMAC1 via SGMII */
 | 
			
		||||
+	ap136_ar8327_pad0_cfg.mode = AR8327_PAD_MAC_SGMII;
 | 
			
		||||
+	ap136_ar8327_pad0_cfg.sgmii_delay_en = true;
 | 
			
		||||
 
 | 
			
		||||
-	ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
 | 
			
		||||
-	ath79_register_pci();
 | 
			
		||||
+
 | 
			
		||||
+	/* GMAC6 of the AR8327 switch is connected to GMAC0 via RGMII */
 | 
			
		||||
+	ap136_ar8327_pad6_cfg.mode = AR8327_PAD_MAC_RGMII;
 | 
			
		||||
+	ap136_ar8327_pad6_cfg.txclk_delay_en = true;
 | 
			
		||||
+	ap136_ar8327_pad6_cfg.rxclk_delay_en = true;
 | 
			
		||||
+	ap136_ar8327_pad6_cfg.txclk_delay_sel = AR8327_CLK_DELAY_SEL1;
 | 
			
		||||
+	ap136_ar8327_pad6_cfg.rxclk_delay_sel = AR8327_CLK_DELAY_SEL2;
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
-	ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
 | 
			
		||||
-	ath79_register_pci();
 | 
			
		||||
+	ath79_eth0_pll_data.pll_1000 = 0x56000000;
 | 
			
		||||
+	ath79_eth1_pll_data.pll_1000 = 0x03000101;
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
@@ -117,7 +117,7 @@ Subject: [PATCH 028/304] squash: include ARCH_BCM2708 / ARCH_BCM2709
 | 
			
		||||
 	  This selects a driver for the Broadcom BCM2835 SPI master.
 | 
			
		||||
--- a/drivers/watchdog/Kconfig
 | 
			
		||||
+++ b/drivers/watchdog/Kconfig
 | 
			
		||||
@@ -1291,7 +1291,7 @@ config BCM63XX_WDT
 | 
			
		||||
@@ -1282,7 +1282,7 @@ config BCM63XX_WDT
 | 
			
		||||
 
 | 
			
		||||
 config BCM2835_WDT
 | 
			
		||||
 	tristate "Broadcom BCM2835 hardware watchdog"
 | 
			
		||||
 
 | 
			
		||||
@@ -16,7 +16,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
 | 
			
		||||
 | 
			
		||||
--- a/include/net/sch_generic.h
 | 
			
		||||
+++ b/include/net/sch_generic.h
 | 
			
		||||
@@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, b
 | 
			
		||||
@@ -409,6 +409,15 @@ bool tcf_destroy(struct tcf_proto *tp, b
 | 
			
		||||
 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
 | 
			
		||||
 int skb_do_redirect(struct sk_buff *);
 | 
			
		||||
 
 | 
			
		||||
 
 | 
			
		||||
@@ -1,237 +0,0 @@
 | 
			
		||||
From: WANG Cong <xiyou.wangcong@gmail.com>
 | 
			
		||||
Date: Thu, 25 Feb 2016 14:55:00 -0800
 | 
			
		||||
Subject: [PATCH] net_sched: introduce qdisc_replace() helper
 | 
			
		||||
 | 
			
		||||
Remove nearly duplicated code and prepare for the following patch.
 | 
			
		||||
 | 
			
		||||
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
 | 
			
		||||
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
 | 
			
		||||
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
 | 
			
		||||
Signed-off-by: David S. Miller <davem@davemloft.net>
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
--- a/include/net/sch_generic.h
 | 
			
		||||
+++ b/include/net/sch_generic.h
 | 
			
		||||
@@ -698,6 +698,23 @@ static inline void qdisc_reset_queue(str
 | 
			
		||||
 	sch->qstats.backlog = 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
 | 
			
		||||
+					  struct Qdisc **pold)
 | 
			
		||||
+{
 | 
			
		||||
+	struct Qdisc *old;
 | 
			
		||||
+
 | 
			
		||||
+	sch_tree_lock(sch);
 | 
			
		||||
+	old = *pold;
 | 
			
		||||
+	*pold = new;
 | 
			
		||||
+	if (old != NULL) {
 | 
			
		||||
+		qdisc_tree_decrease_qlen(old, old->q.qlen);
 | 
			
		||||
+		qdisc_reset(old);
 | 
			
		||||
+	}
 | 
			
		||||
+	sch_tree_unlock(sch);
 | 
			
		||||
+
 | 
			
		||||
+	return old;
 | 
			
		||||
+}
 | 
			
		||||
+
 | 
			
		||||
 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
 | 
			
		||||
 					      struct sk_buff_head *list)
 | 
			
		||||
 {
 | 
			
		||||
--- a/net/sched/sch_cbq.c
 | 
			
		||||
+++ b/net/sched/sch_cbq.c
 | 
			
		||||
@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch,
 | 
			
		||||
 			new->reshape_fail = cbq_reshape_fail;
 | 
			
		||||
 #endif
 | 
			
		||||
 	}
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = cl->q;
 | 
			
		||||
-	cl->q = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
 
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &cl->q);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_drr.c
 | 
			
		||||
+++ b/net/sched/sch_drr.c
 | 
			
		||||
@@ -226,11 +226,7 @@ static int drr_graft_class(struct Qdisc
 | 
			
		||||
 			new = &noop_qdisc;
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	drr_purge_queue(cl);
 | 
			
		||||
-	*old = cl->qdisc;
 | 
			
		||||
-	cl->qdisc = new;
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &cl->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_dsmark.c
 | 
			
		||||
+++ b/net/sched/sch_dsmark.c
 | 
			
		||||
@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sc
 | 
			
		||||
 			new = &noop_qdisc;
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = p->q;
 | 
			
		||||
-	p->q = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
-
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &p->q);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_hfsc.c
 | 
			
		||||
+++ b/net/sched/sch_hfsc.c
 | 
			
		||||
@@ -1215,11 +1215,7 @@ hfsc_graft_class(struct Qdisc *sch, unsi
 | 
			
		||||
 			new = &noop_qdisc;
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	hfsc_purge_queue(sch, cl);
 | 
			
		||||
-	*old = cl->qdisc;
 | 
			
		||||
-	cl->qdisc = new;
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &cl->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_htb.c
 | 
			
		||||
+++ b/net/sched/sch_htb.c
 | 
			
		||||
@@ -1163,14 +1163,7 @@ static int htb_graft(struct Qdisc *sch,
 | 
			
		||||
 				     cl->common.classid)) == NULL)
 | 
			
		||||
 		return -ENOBUFS;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = cl->un.leaf.q;
 | 
			
		||||
-	cl->un.leaf.q = new;
 | 
			
		||||
-	if (*old != NULL) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-		qdisc_reset(*old);
 | 
			
		||||
-	}
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_multiq.c
 | 
			
		||||
+++ b/net/sched/sch_multiq.c
 | 
			
		||||
@@ -303,13 +303,7 @@ static int multiq_graft(struct Qdisc *sc
 | 
			
		||||
 	if (new == NULL)
 | 
			
		||||
 		new = &noop_qdisc;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->queues[band];
 | 
			
		||||
-	q->queues[band] = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
-
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->queues[band]);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_netem.c
 | 
			
		||||
+++ b/net/sched/sch_netem.c
 | 
			
		||||
@@ -1037,15 +1037,7 @@ static int netem_graft(struct Qdisc *sch
 | 
			
		||||
 {
 | 
			
		||||
 	struct netem_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->qdisc;
 | 
			
		||||
-	q->qdisc = new;
 | 
			
		||||
-	if (*old) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-		qdisc_reset(*old);
 | 
			
		||||
-	}
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
-
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_prio.c
 | 
			
		||||
+++ b/net/sched/sch_prio.c
 | 
			
		||||
@@ -268,13 +268,7 @@ static int prio_graft(struct Qdisc *sch,
 | 
			
		||||
 	if (new == NULL)
 | 
			
		||||
 		new = &noop_qdisc;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->queues[band];
 | 
			
		||||
-	q->queues[band] = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
-
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->queues[band]);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_qfq.c
 | 
			
		||||
+++ b/net/sched/sch_qfq.c
 | 
			
		||||
@@ -617,11 +617,7 @@ static int qfq_graft_class(struct Qdisc
 | 
			
		||||
 			new = &noop_qdisc;
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	qfq_purge_queue(cl);
 | 
			
		||||
-	*old = cl->qdisc;
 | 
			
		||||
-	cl->qdisc = new;
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &cl->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_red.c
 | 
			
		||||
+++ b/net/sched/sch_red.c
 | 
			
		||||
@@ -313,12 +313,7 @@ static int red_graft(struct Qdisc *sch,
 | 
			
		||||
 	if (new == NULL)
 | 
			
		||||
 		new = &noop_qdisc;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->qdisc;
 | 
			
		||||
-	q->qdisc = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_sfb.c
 | 
			
		||||
+++ b/net/sched/sch_sfb.c
 | 
			
		||||
@@ -606,12 +606,7 @@ static int sfb_graft(struct Qdisc *sch,
 | 
			
		||||
 	if (new == NULL)
 | 
			
		||||
 		new = &noop_qdisc;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->qdisc;
 | 
			
		||||
-	q->qdisc = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_tbf.c
 | 
			
		||||
+++ b/net/sched/sch_tbf.c
 | 
			
		||||
@@ -502,13 +502,7 @@ static int tbf_graft(struct Qdisc *sch,
 | 
			
		||||
 	if (new == NULL)
 | 
			
		||||
 		new = &noop_qdisc;
 | 
			
		||||
 
 | 
			
		||||
-	sch_tree_lock(sch);
 | 
			
		||||
-	*old = q->qdisc;
 | 
			
		||||
-	q->qdisc = new;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 | 
			
		||||
-	qdisc_reset(*old);
 | 
			
		||||
-	sch_tree_unlock(sch);
 | 
			
		||||
-
 | 
			
		||||
+	*old = qdisc_replace(sch, new, &q->qdisc);
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -1,647 +0,0 @@
 | 
			
		||||
From: WANG Cong <xiyou.wangcong@gmail.com>
 | 
			
		||||
Date: Thu, 25 Feb 2016 14:55:01 -0800
 | 
			
		||||
Subject: [PATCH] net_sched: update hierarchical backlog too
 | 
			
		||||
 | 
			
		||||
When the bottom qdisc decides to, for example, drop some packet,
 | 
			
		||||
it calls qdisc_tree_decrease_qlen() to update the queue length
 | 
			
		||||
for all its ancestors, we need to update the backlog too to
 | 
			
		||||
keep the stats on root qdisc accurate.
 | 
			
		||||
 | 
			
		||||
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
 | 
			
		||||
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
 | 
			
		||||
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
 | 
			
		||||
Signed-off-by: David S. Miller <davem@davemloft.net>
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
--- a/include/net/codel.h
 | 
			
		||||
+++ b/include/net/codel.h
 | 
			
		||||
@@ -162,12 +162,14 @@ struct codel_vars {
 | 
			
		||||
  * struct codel_stats - contains codel shared variables and stats
 | 
			
		||||
  * @maxpacket:	largest packet we've seen so far
 | 
			
		||||
  * @drop_count:	temp count of dropped packets in dequeue()
 | 
			
		||||
+ * @drop_len:	bytes of dropped packets in dequeue()
 | 
			
		||||
  * ecn_mark:	number of packets we ECN marked instead of dropping
 | 
			
		||||
  * ce_mark:	number of packets CE marked because sojourn time was above ce_threshold
 | 
			
		||||
  */
 | 
			
		||||
 struct codel_stats {
 | 
			
		||||
 	u32		maxpacket;
 | 
			
		||||
 	u32		drop_count;
 | 
			
		||||
+	u32		drop_len;
 | 
			
		||||
 	u32		ecn_mark;
 | 
			
		||||
 	u32		ce_mark;
 | 
			
		||||
 };
 | 
			
		||||
@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(str
 | 
			
		||||
 								  vars->rec_inv_sqrt);
 | 
			
		||||
 					goto end;
 | 
			
		||||
 				}
 | 
			
		||||
+				stats->drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
 				qdisc_drop(skb, sch);
 | 
			
		||||
 				stats->drop_count++;
 | 
			
		||||
 				skb = dequeue_func(vars, sch);
 | 
			
		||||
@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(str
 | 
			
		||||
 		if (params->ecn && INET_ECN_set_ce(skb)) {
 | 
			
		||||
 			stats->ecn_mark++;
 | 
			
		||||
 		} else {
 | 
			
		||||
+			stats->drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
 			qdisc_drop(skb, sch);
 | 
			
		||||
 			stats->drop_count++;
 | 
			
		||||
 
 | 
			
		||||
--- a/include/net/sch_generic.h
 | 
			
		||||
+++ b/include/net/sch_generic.h
 | 
			
		||||
@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct net
 | 
			
		||||
 			      struct Qdisc *qdisc);
 | 
			
		||||
 void qdisc_reset(struct Qdisc *qdisc);
 | 
			
		||||
 void qdisc_destroy(struct Qdisc *qdisc);
 | 
			
		||||
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
 | 
			
		||||
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 | 
			
		||||
+			       unsigned int len);
 | 
			
		||||
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 | 
			
		||||
 			  const struct Qdisc_ops *ops);
 | 
			
		||||
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 | 
			
		||||
@@ -707,7 +708,7 @@ static inline struct Qdisc *qdisc_replac
 | 
			
		||||
 	old = *pold;
 | 
			
		||||
 	*pold = new;
 | 
			
		||||
 	if (old != NULL) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(old, old->q.qlen);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
 | 
			
		||||
 		qdisc_reset(old);
 | 
			
		||||
 	}
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
--- a/net/sched/sch_api.c
 | 
			
		||||
+++ b/net/sched/sch_api.c
 | 
			
		||||
@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net
 | 
			
		||||
 	return 0;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
 | 
			
		||||
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
 | 
			
		||||
+			       unsigned int len)
 | 
			
		||||
 {
 | 
			
		||||
 	const struct Qdisc_class_ops *cops;
 | 
			
		||||
 	unsigned long cl;
 | 
			
		||||
 	u32 parentid;
 | 
			
		||||
 	int drops;
 | 
			
		||||
 
 | 
			
		||||
-	if (n == 0)
 | 
			
		||||
+	if (n == 0 && len == 0)
 | 
			
		||||
 		return;
 | 
			
		||||
 	drops = max_t(int, n, 0);
 | 
			
		||||
 	rcu_read_lock();
 | 
			
		||||
@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdi
 | 
			
		||||
 			cops->put(sch, cl);
 | 
			
		||||
 		}
 | 
			
		||||
 		sch->q.qlen -= n;
 | 
			
		||||
+		sch->qstats.backlog -= len;
 | 
			
		||||
 		__qdisc_qstats_drop(sch, drops);
 | 
			
		||||
 	}
 | 
			
		||||
 	rcu_read_unlock();
 | 
			
		||||
 }
 | 
			
		||||
-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 | 
			
		||||
+EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
 | 
			
		||||
 
 | 
			
		||||
 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 | 
			
		||||
 			       struct nlmsghdr *n, u32 clid,
 | 
			
		||||
--- a/net/sched/sch_cbq.c
 | 
			
		||||
+++ b/net/sched/sch_cbq.c
 | 
			
		||||
@@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch,
 | 
			
		||||
 {
 | 
			
		||||
 	struct cbq_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct cbq_class *cl = (struct cbq_class *)arg;
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
+	unsigned int qlen, backlog;
 | 
			
		||||
 
 | 
			
		||||
 	if (cl->filters || cl->children || cl == &q->link)
 | 
			
		||||
 		return -EBUSY;
 | 
			
		||||
@@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch,
 | 
			
		||||
 	sch_tree_lock(sch);
 | 
			
		||||
 
 | 
			
		||||
 	qlen = cl->q->q.qlen;
 | 
			
		||||
+	backlog = cl->q->qstats.backlog;
 | 
			
		||||
 	qdisc_reset(cl->q);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(cl->q, qlen);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
 | 
			
		||||
 
 | 
			
		||||
 	if (cl->next_alive)
 | 
			
		||||
 		cbq_deactivate_class(cl);
 | 
			
		||||
--- a/net/sched/sch_choke.c
 | 
			
		||||
+++ b/net/sched/sch_choke.c
 | 
			
		||||
@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdi
 | 
			
		||||
 		choke_zap_tail_holes(q);
 | 
			
		||||
 
 | 
			
		||||
 	qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
 | 
			
		||||
 	qdisc_drop(skb, sch);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, 1);
 | 
			
		||||
 	--sch->q.qlen;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sc
 | 
			
		||||
 		old = q->tab;
 | 
			
		||||
 		if (old) {
 | 
			
		||||
 			unsigned int oqlen = sch->q.qlen, tail = 0;
 | 
			
		||||
+			unsigned dropped = 0;
 | 
			
		||||
 
 | 
			
		||||
 			while (q->head != q->tail) {
 | 
			
		||||
 				struct sk_buff *skb = q->tab[q->head];
 | 
			
		||||
@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sc
 | 
			
		||||
 					ntab[tail++] = skb;
 | 
			
		||||
 					continue;
 | 
			
		||||
 				}
 | 
			
		||||
+				dropped += qdisc_pkt_len(skb);
 | 
			
		||||
 				qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
 				--sch->q.qlen;
 | 
			
		||||
 				qdisc_drop(skb, sch);
 | 
			
		||||
 			}
 | 
			
		||||
-			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
 | 
			
		||||
+			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
 | 
			
		||||
 			q->head = 0;
 | 
			
		||||
 			q->tail = tail;
 | 
			
		||||
 		}
 | 
			
		||||
--- a/net/sched/sch_codel.c
 | 
			
		||||
+++ b/net/sched/sch_codel.c
 | 
			
		||||
@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_deque
 | 
			
		||||
 
 | 
			
		||||
 	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
 | 
			
		||||
 
 | 
			
		||||
-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
 | 
			
		||||
+	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
 | 
			
		||||
 	 * or HTB crashes. Defer it for next round.
 | 
			
		||||
 	 */
 | 
			
		||||
 	if (q->stats.drop_count && sch->q.qlen) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
 | 
			
		||||
 		q->stats.drop_count = 0;
 | 
			
		||||
+		q->stats.drop_len = 0;
 | 
			
		||||
 	}
 | 
			
		||||
 	if (skb)
 | 
			
		||||
 		qdisc_bstats_update(sch, skb);
 | 
			
		||||
@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sc
 | 
			
		||||
 {
 | 
			
		||||
 	struct codel_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct nlattr *tb[TCA_CODEL_MAX + 1];
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
+	unsigned int qlen, dropped = 0;
 | 
			
		||||
 	int err;
 | 
			
		||||
 
 | 
			
		||||
 	if (!opt)
 | 
			
		||||
@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sc
 | 
			
		||||
 	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
 		struct sk_buff *skb = __skb_dequeue(&sch->q);
 | 
			
		||||
 
 | 
			
		||||
+		dropped += qdisc_pkt_len(skb);
 | 
			
		||||
 		qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
 		qdisc_drop(skb, sch);
 | 
			
		||||
 	}
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
 	return 0;
 | 
			
		||||
--- a/net/sched/sch_drr.c
 | 
			
		||||
+++ b/net/sched/sch_drr.c
 | 
			
		||||
@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(
 | 
			
		||||
 static void drr_purge_queue(struct drr_class *cl)
 | 
			
		||||
 {
 | 
			
		||||
 	unsigned int len = cl->qdisc->q.qlen;
 | 
			
		||||
+	unsigned int backlog = cl->qdisc->qstats.backlog;
 | 
			
		||||
 
 | 
			
		||||
 	qdisc_reset(cl->qdisc);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(cl->qdisc, len);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
 | 
			
		||||
--- a/net/sched/sch_fq.c
 | 
			
		||||
+++ b/net/sched/sch_fq.c
 | 
			
		||||
@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch,
 | 
			
		||||
 	struct fq_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct nlattr *tb[TCA_FQ_MAX + 1];
 | 
			
		||||
 	int err, drop_count = 0;
 | 
			
		||||
+	unsigned drop_len = 0;
 | 
			
		||||
 	u32 fq_log;
 | 
			
		||||
 
 | 
			
		||||
 	if (!opt)
 | 
			
		||||
@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch,
 | 
			
		||||
 
 | 
			
		||||
 		if (!skb)
 | 
			
		||||
 			break;
 | 
			
		||||
+		drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
 		kfree_skb(skb);
 | 
			
		||||
 		drop_count++;
 | 
			
		||||
 	}
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, drop_count);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
 	return err;
 | 
			
		||||
--- a/net/sched/sch_fq_codel.c
 | 
			
		||||
+++ b/net/sched/sch_fq_codel.c
 | 
			
		||||
@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(
 | 
			
		||||
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 | 
			
		||||
 {
 | 
			
		||||
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
-	unsigned int idx;
 | 
			
		||||
+	unsigned int idx, prev_backlog;
 | 
			
		||||
 	struct fq_codel_flow *flow;
 | 
			
		||||
 	int uninitialized_var(ret);
 | 
			
		||||
 
 | 
			
		||||
@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_bu
 | 
			
		||||
 	if (++sch->q.qlen <= sch->limit)
 | 
			
		||||
 		return NET_XMIT_SUCCESS;
 | 
			
		||||
 
 | 
			
		||||
+	prev_backlog = sch->qstats.backlog;
 | 
			
		||||
 	q->drop_overlimit++;
 | 
			
		||||
 	/* Return Congestion Notification only if we dropped a packet
 | 
			
		||||
 	 * from this flow.
 | 
			
		||||
@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_bu
 | 
			
		||||
 		return NET_XMIT_CN;
 | 
			
		||||
 
 | 
			
		||||
 	/* As we dropped a packet, better let upper stack know this */
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, 1);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
 | 
			
		||||
 	return NET_XMIT_SUCCESS;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(
 | 
			
		||||
 	struct fq_codel_flow *flow;
 | 
			
		||||
 	struct list_head *head;
 | 
			
		||||
 	u32 prev_drop_count, prev_ecn_mark;
 | 
			
		||||
+	unsigned int prev_backlog;
 | 
			
		||||
 
 | 
			
		||||
 begin:
 | 
			
		||||
 	head = &q->new_flows;
 | 
			
		||||
@@ -259,6 +261,7 @@ begin:
 | 
			
		||||
 
 | 
			
		||||
 	prev_drop_count = q->cstats.drop_count;
 | 
			
		||||
 	prev_ecn_mark = q->cstats.ecn_mark;
 | 
			
		||||
+	prev_backlog = sch->qstats.backlog;
 | 
			
		||||
 
 | 
			
		||||
 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
 | 
			
		||||
 			    dequeue);
 | 
			
		||||
@@ -276,12 +279,14 @@ begin:
 | 
			
		||||
 	}
 | 
			
		||||
 	qdisc_bstats_update(sch, skb);
 | 
			
		||||
 	flow->deficit -= qdisc_pkt_len(skb);
 | 
			
		||||
-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
 | 
			
		||||
+	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
 | 
			
		||||
 	 * or HTB crashes. Defer it for next round.
 | 
			
		||||
 	 */
 | 
			
		||||
 	if (q->cstats.drop_count && sch->q.qlen) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
 | 
			
		||||
+					  q->cstats.drop_len);
 | 
			
		||||
 		q->cstats.drop_count = 0;
 | 
			
		||||
+		q->cstats.drop_len = 0;
 | 
			
		||||
 	}
 | 
			
		||||
 	return skb;
 | 
			
		||||
 }
 | 
			
		||||
@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc
 | 
			
		||||
 	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
 		struct sk_buff *skb = fq_codel_dequeue(sch);
 | 
			
		||||
 
 | 
			
		||||
+		q->cstats.drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
 		kfree_skb(skb);
 | 
			
		||||
 		q->cstats.drop_count++;
 | 
			
		||||
 	}
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
 | 
			
		||||
 	q->cstats.drop_count = 0;
 | 
			
		||||
+	q->cstats.drop_len = 0;
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
 	return 0;
 | 
			
		||||
--- a/net/sched/sch_hfsc.c
 | 
			
		||||
+++ b/net/sched/sch_hfsc.c
 | 
			
		||||
@@ -895,9 +895,10 @@ static void
 | 
			
		||||
 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
 | 
			
		||||
 {
 | 
			
		||||
 	unsigned int len = cl->qdisc->q.qlen;
 | 
			
		||||
+	unsigned int backlog = cl->qdisc->qstats.backlog;
 | 
			
		||||
 
 | 
			
		||||
 	qdisc_reset(cl->qdisc);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(cl->qdisc, len);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
 static void
 | 
			
		||||
--- a/net/sched/sch_hhf.c
 | 
			
		||||
+++ b/net/sched/sch_hhf.c
 | 
			
		||||
@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *s
 | 
			
		||||
 	struct hhf_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	enum wdrr_bucket_idx idx;
 | 
			
		||||
 	struct wdrr_bucket *bucket;
 | 
			
		||||
+	unsigned int prev_backlog;
 | 
			
		||||
 
 | 
			
		||||
 	idx = hhf_classify(skb, sch);
 | 
			
		||||
 
 | 
			
		||||
@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *s
 | 
			
		||||
 	if (++sch->q.qlen <= sch->limit)
 | 
			
		||||
 		return NET_XMIT_SUCCESS;
 | 
			
		||||
 
 | 
			
		||||
+	prev_backlog = sch->qstats.backlog;
 | 
			
		||||
 	q->drop_overlimit++;
 | 
			
		||||
 	/* Return Congestion Notification only if we dropped a packet from this
 | 
			
		||||
 	 * bucket.
 | 
			
		||||
@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *s
 | 
			
		||||
 		return NET_XMIT_CN;
 | 
			
		||||
 
 | 
			
		||||
 	/* As we dropped a packet, better let upper stack know this. */
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, 1);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
 | 
			
		||||
 	return NET_XMIT_SUCCESS;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch,
 | 
			
		||||
 {
 | 
			
		||||
 	struct hhf_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct nlattr *tb[TCA_HHF_MAX + 1];
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
+	unsigned int qlen, prev_backlog;
 | 
			
		||||
 	int err;
 | 
			
		||||
 	u64 non_hh_quantum;
 | 
			
		||||
 	u32 new_quantum = q->quantum;
 | 
			
		||||
@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch,
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
 	qlen = sch->q.qlen;
 | 
			
		||||
+	prev_backlog = sch->qstats.backlog;
 | 
			
		||||
 	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
 		struct sk_buff *skb = hhf_dequeue(sch);
 | 
			
		||||
 
 | 
			
		||||
 		kfree_skb(skb);
 | 
			
		||||
 	}
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
 | 
			
		||||
+				  prev_backlog - sch->qstats.backlog);
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
 	return 0;
 | 
			
		||||
--- a/net/sched/sch_htb.c
 | 
			
		||||
+++ b/net/sched/sch_htb.c
 | 
			
		||||
@@ -1265,7 +1265,6 @@ static int htb_delete(struct Qdisc *sch,
 | 
			
		||||
 {
 | 
			
		||||
 	struct htb_sched *q = qdisc_priv(sch);
 | 
			
		||||
 	struct htb_class *cl = (struct htb_class *)arg;
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
 	struct Qdisc *new_q = NULL;
 | 
			
		||||
 	int last_child = 0;
 | 
			
		||||
 
 | 
			
		||||
@@ -1285,9 +1284,11 @@ static int htb_delete(struct Qdisc *sch,
 | 
			
		||||
 	sch_tree_lock(sch);
 | 
			
		||||
 
 | 
			
		||||
 	if (!cl->level) {
 | 
			
		||||
-		qlen = cl->un.leaf.q->q.qlen;
 | 
			
		||||
+		unsigned int qlen = cl->un.leaf.q->q.qlen;
 | 
			
		||||
+		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
 | 
			
		||||
+
 | 
			
		||||
 		qdisc_reset(cl->un.leaf.q);
 | 
			
		||||
-		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
 	/* delete from hash and active; remainder in destroy_class */
 | 
			
		||||
@@ -1421,10 +1422,11 @@ static int htb_change_class(struct Qdisc
 | 
			
		||||
 		sch_tree_lock(sch);
 | 
			
		||||
 		if (parent && !parent->level) {
 | 
			
		||||
 			unsigned int qlen = parent->un.leaf.q->q.qlen;
 | 
			
		||||
+			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
 | 
			
		||||
 
 | 
			
		||||
 			/* turn parent into inner node */
 | 
			
		||||
 			qdisc_reset(parent->un.leaf.q);
 | 
			
		||||
-			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
 | 
			
		||||
+			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
 | 
			
		||||
 			qdisc_destroy(parent->un.leaf.q);
 | 
			
		||||
 			if (parent->prio_activity)
 | 
			
		||||
 				htb_deactivate(q, parent);
 | 
			
		||||
--- a/net/sched/sch_multiq.c
 | 
			
		||||
+++ b/net/sched/sch_multiq.c
 | 
			
		||||
@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch
 | 
			
		||||
 		if (q->queues[i] != &noop_qdisc) {
 | 
			
		||||
 			struct Qdisc *child = q->queues[i];
 | 
			
		||||
 			q->queues[i] = &noop_qdisc;
 | 
			
		||||
-			qdisc_tree_decrease_qlen(child, child->q.qlen);
 | 
			
		||||
+			qdisc_tree_reduce_backlog(child, child->q.qlen,
 | 
			
		||||
+						  child->qstats.backlog);
 | 
			
		||||
 			qdisc_destroy(child);
 | 
			
		||||
 		}
 | 
			
		||||
 	}
 | 
			
		||||
@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch
 | 
			
		||||
 				q->queues[i] = child;
 | 
			
		||||
 
 | 
			
		||||
 				if (old != &noop_qdisc) {
 | 
			
		||||
-					qdisc_tree_decrease_qlen(old,
 | 
			
		||||
-								 old->q.qlen);
 | 
			
		||||
+					qdisc_tree_reduce_backlog(old,
 | 
			
		||||
+								  old->q.qlen,
 | 
			
		||||
+								  old->qstats.backlog);
 | 
			
		||||
 					qdisc_destroy(old);
 | 
			
		||||
 				}
 | 
			
		||||
 				sch_tree_unlock(sch);
 | 
			
		||||
--- a/net/sched/sch_netem.c
 | 
			
		||||
+++ b/net/sched/sch_netem.c
 | 
			
		||||
@@ -598,7 +598,8 @@ deliver:
 | 
			
		||||
 				if (unlikely(err != NET_XMIT_SUCCESS)) {
 | 
			
		||||
 					if (net_xmit_drop_count(err)) {
 | 
			
		||||
 						qdisc_qstats_drop(sch);
 | 
			
		||||
-						qdisc_tree_decrease_qlen(sch, 1);
 | 
			
		||||
+						qdisc_tree_reduce_backlog(sch, 1,
 | 
			
		||||
+									  qdisc_pkt_len(skb));
 | 
			
		||||
 					}
 | 
			
		||||
 				}
 | 
			
		||||
 				goto tfifo_dequeue;
 | 
			
		||||
--- a/net/sched/sch_pie.c
 | 
			
		||||
+++ b/net/sched/sch_pie.c
 | 
			
		||||
@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch,
 | 
			
		||||
 {
 | 
			
		||||
 	struct pie_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct nlattr *tb[TCA_PIE_MAX + 1];
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
+	unsigned int qlen, dropped = 0;
 | 
			
		||||
 	int err;
 | 
			
		||||
 
 | 
			
		||||
 	if (!opt)
 | 
			
		||||
@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch,
 | 
			
		||||
 	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
 		struct sk_buff *skb = __skb_dequeue(&sch->q);
 | 
			
		||||
 
 | 
			
		||||
+		dropped += qdisc_pkt_len(skb);
 | 
			
		||||
 		qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
 		qdisc_drop(skb, sch);
 | 
			
		||||
 	}
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_unlock(sch);
 | 
			
		||||
 	return 0;
 | 
			
		||||
--- a/net/sched/sch_prio.c
 | 
			
		||||
+++ b/net/sched/sch_prio.c
 | 
			
		||||
@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch,
 | 
			
		||||
 		struct Qdisc *child = q->queues[i];
 | 
			
		||||
 		q->queues[i] = &noop_qdisc;
 | 
			
		||||
 		if (child != &noop_qdisc) {
 | 
			
		||||
-			qdisc_tree_decrease_qlen(child, child->q.qlen);
 | 
			
		||||
+			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
 | 
			
		||||
 			qdisc_destroy(child);
 | 
			
		||||
 		}
 | 
			
		||||
 	}
 | 
			
		||||
@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch,
 | 
			
		||||
 				q->queues[i] = child;
 | 
			
		||||
 
 | 
			
		||||
 				if (old != &noop_qdisc) {
 | 
			
		||||
-					qdisc_tree_decrease_qlen(old,
 | 
			
		||||
-								 old->q.qlen);
 | 
			
		||||
+					qdisc_tree_reduce_backlog(old,
 | 
			
		||||
+								  old->q.qlen,
 | 
			
		||||
+								  old->qstats.backlog);
 | 
			
		||||
 					qdisc_destroy(old);
 | 
			
		||||
 				}
 | 
			
		||||
 				sch_tree_unlock(sch);
 | 
			
		||||
--- a/net/sched/sch_qfq.c
 | 
			
		||||
+++ b/net/sched/sch_qfq.c
 | 
			
		||||
@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(
 | 
			
		||||
 static void qfq_purge_queue(struct qfq_class *cl)
 | 
			
		||||
 {
 | 
			
		||||
 	unsigned int len = cl->qdisc->q.qlen;
 | 
			
		||||
+	unsigned int backlog = cl->qdisc->qstats.backlog;
 | 
			
		||||
 
 | 
			
		||||
 	qdisc_reset(cl->qdisc);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(cl->qdisc, len);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
 | 
			
		||||
--- a/net/sched/sch_red.c
 | 
			
		||||
+++ b/net/sched/sch_red.c
 | 
			
		||||
@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch,
 | 
			
		||||
 	q->flags = ctl->flags;
 | 
			
		||||
 	q->limit = ctl->limit;
 | 
			
		||||
 	if (child) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
 | 
			
		||||
+					  q->qdisc->qstats.backlog);
 | 
			
		||||
 		qdisc_destroy(q->qdisc);
 | 
			
		||||
 		q->qdisc = child;
 | 
			
		||||
 	}
 | 
			
		||||
--- a/net/sched/sch_sfb.c
 | 
			
		||||
+++ b/net/sched/sch_sfb.c
 | 
			
		||||
@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch,
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_lock(sch);
 | 
			
		||||
 
 | 
			
		||||
-	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
 | 
			
		||||
+				  q->qdisc->qstats.backlog);
 | 
			
		||||
 	qdisc_destroy(q->qdisc);
 | 
			
		||||
 	q->qdisc = child;
 | 
			
		||||
 
 | 
			
		||||
--- a/net/sched/sch_sfq.c
 | 
			
		||||
+++ b/net/sched/sch_sfq.c
 | 
			
		||||
@@ -346,7 +346,7 @@ static int
 | 
			
		||||
 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 | 
			
		||||
 {
 | 
			
		||||
 	struct sfq_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
-	unsigned int hash;
 | 
			
		||||
+	unsigned int hash, dropped;
 | 
			
		||||
 	sfq_index x, qlen;
 | 
			
		||||
 	struct sfq_slot *slot;
 | 
			
		||||
 	int uninitialized_var(ret);
 | 
			
		||||
@@ -461,7 +461,7 @@ enqueue:
 | 
			
		||||
 		return NET_XMIT_SUCCESS;
 | 
			
		||||
 
 | 
			
		||||
 	qlen = slot->qlen;
 | 
			
		||||
-	sfq_drop(sch);
 | 
			
		||||
+	dropped = sfq_drop(sch);
 | 
			
		||||
 	/* Return Congestion Notification only if we dropped a packet
 | 
			
		||||
 	 * from this flow.
 | 
			
		||||
 	 */
 | 
			
		||||
@@ -469,7 +469,7 @@ enqueue:
 | 
			
		||||
 		return NET_XMIT_CN;
 | 
			
		||||
 
 | 
			
		||||
 	/* As we dropped a packet, better let upper stack know this */
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, 1);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, 1, dropped);
 | 
			
		||||
 	return NET_XMIT_SUCCESS;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch
 | 
			
		||||
 	struct sfq_slot *slot;
 | 
			
		||||
 	struct sk_buff_head list;
 | 
			
		||||
 	int dropped = 0;
 | 
			
		||||
+	unsigned int drop_len = 0;
 | 
			
		||||
 
 | 
			
		||||
 	__skb_queue_head_init(&list);
 | 
			
		||||
 
 | 
			
		||||
@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch
 | 
			
		||||
 			if (x >= SFQ_MAX_FLOWS) {
 | 
			
		||||
 drop:
 | 
			
		||||
 				qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
+				drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
 				kfree_skb(skb);
 | 
			
		||||
 				dropped++;
 | 
			
		||||
 				continue;
 | 
			
		||||
@@ -594,7 +596,7 @@ drop:
 | 
			
		||||
 		}
 | 
			
		||||
 	}
 | 
			
		||||
 	sch->q.qlen -= dropped;
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, dropped);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
 static void sfq_perturbation(unsigned long arg)
 | 
			
		||||
@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch,
 | 
			
		||||
 	struct sfq_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct tc_sfq_qopt *ctl = nla_data(opt);
 | 
			
		||||
 	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
 | 
			
		||||
-	unsigned int qlen;
 | 
			
		||||
+	unsigned int qlen, dropped = 0;
 | 
			
		||||
 	struct red_parms *p = NULL;
 | 
			
		||||
 
 | 
			
		||||
 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
 | 
			
		||||
@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch,
 | 
			
		||||
 
 | 
			
		||||
 	qlen = sch->q.qlen;
 | 
			
		||||
 	while (sch->q.qlen > q->limit)
 | 
			
		||||
-		sfq_drop(sch);
 | 
			
		||||
-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 | 
			
		||||
+		dropped += sfq_drop(sch);
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 | 
			
		||||
 
 | 
			
		||||
 	del_timer(&q->perturb_timer);
 | 
			
		||||
 	if (q->perturb_period) {
 | 
			
		||||
--- a/net/sched/sch_tbf.c
 | 
			
		||||
+++ b/net/sched/sch_tbf.c
 | 
			
		||||
@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *s
 | 
			
		||||
 	struct tbf_sched_data *q = qdisc_priv(sch);
 | 
			
		||||
 	struct sk_buff *segs, *nskb;
 | 
			
		||||
 	netdev_features_t features = netif_skb_features(skb);
 | 
			
		||||
+	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
 | 
			
		||||
 	int ret, nb;
 | 
			
		||||
 
 | 
			
		||||
 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 | 
			
		||||
@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *s
 | 
			
		||||
 		nskb = segs->next;
 | 
			
		||||
 		segs->next = NULL;
 | 
			
		||||
 		qdisc_skb_cb(segs)->pkt_len = segs->len;
 | 
			
		||||
+		len += segs->len;
 | 
			
		||||
 		ret = qdisc_enqueue(segs, q->qdisc);
 | 
			
		||||
 		if (ret != NET_XMIT_SUCCESS) {
 | 
			
		||||
 			if (net_xmit_drop_count(ret))
 | 
			
		||||
@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *s
 | 
			
		||||
 	}
 | 
			
		||||
 	sch->q.qlen += nb;
 | 
			
		||||
 	if (nb > 1)
 | 
			
		||||
-		qdisc_tree_decrease_qlen(sch, 1 - nb);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
 | 
			
		||||
 	consume_skb(skb);
 | 
			
		||||
 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
 | 
			
		||||
 }
 | 
			
		||||
@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch,
 | 
			
		||||
 
 | 
			
		||||
 	sch_tree_lock(sch);
 | 
			
		||||
 	if (child) {
 | 
			
		||||
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
 | 
			
		||||
+		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
 | 
			
		||||
+					  q->qdisc->qstats.backlog);
 | 
			
		||||
 		qdisc_destroy(q->qdisc);
 | 
			
		||||
 		q->qdisc = child;
 | 
			
		||||
 	}
 | 
			
		||||
@@ -138,12 +138,12 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
 | 
			
		||||
-	if (fq_codel_drop(sch) == idx)
 | 
			
		||||
-		return NET_XMIT_CN;
 | 
			
		||||
+	ret = fq_codel_drop(sch, q->drop_batch_size);
 | 
			
		||||
+
 | 
			
		||||
+	q->drop_overlimit += prev_qlen - sch->q.qlen;
 | 
			
		||||
 
 | 
			
		||||
-	/* As we dropped a packet, better let upper stack know this */
 | 
			
		||||
-	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
 | 
			
		||||
-	return NET_XMIT_SUCCESS;
 | 
			
		||||
+	q->drop_overlimit += prev_qlen - sch->q.qlen;
 | 
			
		||||
+
 | 
			
		||||
+	/* As we dropped packet(s), better let upper stack know this */
 | 
			
		||||
+	qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
 | 
			
		||||
+				  prev_backlog - sch->qstats.backlog);
 | 
			
		||||
 
 | 
			
		||||
@@ -26,6 +26,19 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
+/* We have included libc headers... */
 | 
			
		||||
+#if !defined(__KERNEL__)
 | 
			
		||||
 
 | 
			
		||||
-/* Coordinate with glibc net/if.h header. */
 | 
			
		||||
+/* Coordinate with libc net/if.h header. */
 | 
			
		||||
 #if defined(_NET_IF_H)
 | 
			
		||||
 
 | 
			
		||||
-/* GLIBC headers included first so don't define anything
 | 
			
		||||
+/* LIBC headers included first so don't define anything
 | 
			
		||||
  * that would already be defined. */
 | 
			
		||||
 
 | 
			
		||||
 #define __UAPI_DEF_IF_IFCONF 0
 | 
			
		||||
@@ -85,10 +85,10 @@
 | 
			
		||||
 
 | 
			
		||||
 #endif /* _NET_IF_H */
 | 
			
		||||
 
 | 
			
		||||
-/* Coordinate with glibc netinet/in.h header. */
 | 
			
		||||
+/* Coordinate with libc netinet/in.h header. */
 | 
			
		||||
 #if defined(_NETINET_IN_H)
 | 
			
		||||
@@ -35,7 +48,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
  * that would already be defined. */
 | 
			
		||||
 #define __UAPI_DEF_IN_ADDR		0
 | 
			
		||||
 #define __UAPI_DEF_IN_IPPROTO		0
 | 
			
		||||
@@ -68,7 +68,7 @@
 | 
			
		||||
@@ -102,7 +102,7 @@
 | 
			
		||||
  * if the glibc code didn't define them. This guard matches
 | 
			
		||||
  * the guard in glibc/inet/netinet/in.h which defines the
 | 
			
		||||
  * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */
 | 
			
		||||
@@ -44,7 +57,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 #define __UAPI_DEF_IN6_ADDR_ALT		0
 | 
			
		||||
 #else
 | 
			
		||||
 #define __UAPI_DEF_IN6_ADDR_ALT		1
 | 
			
		||||
@@ -83,7 +83,7 @@
 | 
			
		||||
@@ -117,7 +117,7 @@
 | 
			
		||||
 #else
 | 
			
		||||
 
 | 
			
		||||
 /* Linux headers included first, and we must define everything
 | 
			
		||||
@@ -53,7 +66,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
  * __UAPI_DEF_* defines and adjust appropriately. */
 | 
			
		||||
 #define __UAPI_DEF_IN_ADDR		1
 | 
			
		||||
 #define __UAPI_DEF_IN_IPPROTO		1
 | 
			
		||||
@@ -93,7 +93,7 @@
 | 
			
		||||
@@ -127,7 +127,7 @@
 | 
			
		||||
 #define __UAPI_DEF_IN_CLASS		1
 | 
			
		||||
 
 | 
			
		||||
 #define __UAPI_DEF_IN6_ADDR		1
 | 
			
		||||
@@ -62,16 +75,16 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
  * coordinate. */
 | 
			
		||||
 #define __UAPI_DEF_IN6_ADDR_ALT		1
 | 
			
		||||
 #define __UAPI_DEF_SOCKADDR_IN6		1
 | 
			
		||||
@@ -115,7 +115,7 @@
 | 
			
		||||
@@ -149,7 +149,7 @@
 | 
			
		||||
 /* If we did not see any headers from any supported C libraries,
 | 
			
		||||
  * or we are being included in the kernel, then define everything
 | 
			
		||||
  * that we need. */
 | 
			
		||||
-#else /* !defined(__GLIBC__) */
 | 
			
		||||
+#else /* defined(__KERNEL__) */
 | 
			
		||||
 
 | 
			
		||||
 /* Definitions for in.h */
 | 
			
		||||
 #define __UAPI_DEF_IN_ADDR		1
 | 
			
		||||
@@ -138,6 +138,6 @@
 | 
			
		||||
 /* Definitions for if.h */
 | 
			
		||||
 #define __UAPI_DEF_IF_IFCONF 1
 | 
			
		||||
@@ -182,6 +182,6 @@
 | 
			
		||||
 /* Definitions for xattr.h */
 | 
			
		||||
 #define __UAPI_DEF_XATTR		1
 | 
			
		||||
 
 | 
			
		||||
 
 | 
			
		||||
@@ -40,9 +40,9 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 #endif /* _UAPI_LINUX_IF_ETHER_H */
 | 
			
		||||
--- a/include/uapi/linux/libc-compat.h
 | 
			
		||||
+++ b/include/uapi/linux/libc-compat.h
 | 
			
		||||
@@ -51,6 +51,14 @@
 | 
			
		||||
 /* We have included libc headers... */
 | 
			
		||||
 #if !defined(__KERNEL__)
 | 
			
		||||
@@ -85,6 +85,14 @@
 | 
			
		||||
 
 | 
			
		||||
 #endif /* _NET_IF_H */
 | 
			
		||||
 
 | 
			
		||||
+/* musl defines the ethhdr struct itself in its netinet/if_ether.h.
 | 
			
		||||
+ * Glibc just includes the kernel header and uses a different guard. */
 | 
			
		||||
@@ -55,9 +55,9 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 /* Coordinate with libc netinet/in.h header. */
 | 
			
		||||
 #if defined(_NETINET_IN_H)
 | 
			
		||||
 
 | 
			
		||||
@@ -117,6 +125,9 @@
 | 
			
		||||
  * that we need. */
 | 
			
		||||
 #else /* defined(__KERNEL__) */
 | 
			
		||||
@@ -161,6 +169,9 @@
 | 
			
		||||
 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
 | 
			
		||||
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
 | 
			
		||||
 
 | 
			
		||||
+/* Definitions for if_ether.h */
 | 
			
		||||
+#define __UAPI_DEF_ETHHDR 		1
 | 
			
		||||
 
 | 
			
		||||
@@ -284,15 +284,15 @@ Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
 | 
			
		||||
+	EXPORT(kexec_argv_buf)
 | 
			
		||||
+	.skip		KEXEC_COMMAND_LINE_SIZE
 | 
			
		||||
+	.size		kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
 | 
			
		||||
+
 | 
			
		||||
+kexec_argv:
 | 
			
		||||
+	EXPORT(kexec_argv)
 | 
			
		||||
+	.skip		KEXEC_ARGV_SIZE
 | 
			
		||||
+	.size		kexec_argv, KEXEC_ARGV_SIZE
 | 
			
		||||
 
 | 
			
		||||
-relocate_new_kernel_size:
 | 
			
		||||
-	EXPORT(relocate_new_kernel_size)
 | 
			
		||||
-	PTR		relocate_new_kernel_end - relocate_new_kernel
 | 
			
		||||
-	.size		relocate_new_kernel_size, PTRSIZE
 | 
			
		||||
+kexec_argv:
 | 
			
		||||
+	EXPORT(kexec_argv)
 | 
			
		||||
+	.skip		KEXEC_ARGV_SIZE
 | 
			
		||||
+	.size		kexec_argv, KEXEC_ARGV_SIZE
 | 
			
		||||
+
 | 
			
		||||
+kexec_relocate_new_kernel_end:
 | 
			
		||||
+	EXPORT(kexec_relocate_new_kernel_end)
 | 
			
		||||
 
 | 
			
		||||
@@ -219,26 +219,26 @@
 | 
			
		||||
 {
 | 
			
		||||
   UInt32 dicSize;
 | 
			
		||||
   Byte d;
 | 
			
		||||
@@ -935,33 +883,11 @@ static SRes LzmaDec_AllocateProbs2(CLzma
 | 
			
		||||
@@ -935,7 +883,7 @@ static SRes LzmaDec_AllocateProbs2(CLzma
 | 
			
		||||
   return SZ_OK;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
 | 
			
		||||
-{
 | 
			
		||||
-  CLzmaProps propNew;
 | 
			
		||||
-  RINOK(LzmaProps_Decode(&propNew, props, propsSize));
 | 
			
		||||
-  RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
 | 
			
		||||
-  p->prop = propNew;
 | 
			
		||||
-  return SZ_OK;
 | 
			
		||||
-}
 | 
			
		||||
-
 | 
			
		||||
-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
 | 
			
		||||
+static SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
 | 
			
		||||
 {
 | 
			
		||||
   CLzmaProps propNew;
 | 
			
		||||
-  SizeT dicBufSize;
 | 
			
		||||
   RINOK(LzmaProps_Decode(&propNew, props, propsSize));
 | 
			
		||||
   RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
 | 
			
		||||
@@ -943,28 +891,6 @@ SRes LzmaDec_AllocateProbs(CLzmaDec *p,
 | 
			
		||||
   p->prop = propNew;
 | 
			
		||||
   return SZ_OK;
 | 
			
		||||
 }
 | 
			
		||||
-
 | 
			
		||||
-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
 | 
			
		||||
-{
 | 
			
		||||
-  CLzmaProps propNew;
 | 
			
		||||
-  SizeT dicBufSize;
 | 
			
		||||
-  RINOK(LzmaProps_Decode(&propNew, props, propsSize));
 | 
			
		||||
-  RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
 | 
			
		||||
-  dicBufSize = propNew.dicSize;
 | 
			
		||||
-  if (p->dic == 0 || dicBufSize != p->dicBufSize)
 | 
			
		||||
-  {
 | 
			
		||||
@@ -251,9 +251,12 @@
 | 
			
		||||
-    }
 | 
			
		||||
-  }
 | 
			
		||||
-  p->dicBufSize = dicBufSize;
 | 
			
		||||
   p->prop = propNew;
 | 
			
		||||
   return SZ_OK;
 | 
			
		||||
 }
 | 
			
		||||
-  p->prop = propNew;
 | 
			
		||||
-  return SZ_OK;
 | 
			
		||||
-}
 | 
			
		||||
 
 | 
			
		||||
 SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
 | 
			
		||||
     const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
 | 
			
		||||
--- a/include/linux/lzma/LzmaEnc.h
 | 
			
		||||
+++ b/include/linux/lzma/LzmaEnc.h
 | 
			
		||||
@@ -31,9 +31,6 @@ typedef struct _CLzmaEncProps
 | 
			
		||||
 
 | 
			
		||||
@@ -91,7 +91,7 @@ Signed-off-by: Felix Fietkau <nbd@openwrt.org>
 | 
			
		||||
 
 | 
			
		||||
 	if (sock->type == SOCK_PACKET)
 | 
			
		||||
 		po->prot_hook.func = packet_rcv_spkt;
 | 
			
		||||
@@ -3707,6 +3710,16 @@ packet_setsockopt(struct socket *sock, i
 | 
			
		||||
@@ -3708,6 +3711,16 @@ packet_setsockopt(struct socket *sock, i
 | 
			
		||||
 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
 | 
			
		||||
 		return 0;
 | 
			
		||||
 	}
 | 
			
		||||
@@ -108,7 +108,7 @@ Signed-off-by: Felix Fietkau <nbd@openwrt.org>
 | 
			
		||||
 	default:
 | 
			
		||||
 		return -ENOPROTOOPT;
 | 
			
		||||
 	}
 | 
			
		||||
@@ -3759,6 +3772,13 @@ static int packet_getsockopt(struct sock
 | 
			
		||||
@@ -3760,6 +3773,13 @@ static int packet_getsockopt(struct sock
 | 
			
		||||
 	case PACKET_VNET_HDR:
 | 
			
		||||
 		val = po->has_vnet_hdr;
 | 
			
		||||
 		break;
 | 
			
		||||
 
 | 
			
		||||
@@ -172,7 +172,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 			if (!err)
 | 
			
		||||
 				break;
 | 
			
		||||
 		}
 | 
			
		||||
@@ -1422,7 +1448,8 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
@@ -1424,7 +1450,8 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
 			 struct net_bridge_port *port,
 | 
			
		||||
 			 struct br_ip *group,
 | 
			
		||||
 			 struct bridge_mcast_other_query *other_query,
 | 
			
		||||
@@ -182,7 +182,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 {
 | 
			
		||||
 	struct net_bridge_mdb_htable *mdb;
 | 
			
		||||
 	struct net_bridge_mdb_entry *mp;
 | 
			
		||||
@@ -1446,7 +1473,7 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
@@ -1448,7 +1475,7 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
 		for (pp = &mp->ports;
 | 
			
		||||
 		     (p = mlock_dereference(*pp, br)) != NULL;
 | 
			
		||||
 		     pp = &p->next) {
 | 
			
		||||
@@ -191,7 +191,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 				continue;
 | 
			
		||||
 
 | 
			
		||||
 			rcu_assign_pointer(*pp, p->next);
 | 
			
		||||
@@ -1509,7 +1536,7 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
@@ -1511,7 +1538,7 @@ br_multicast_leave_group(struct net_brid
 | 
			
		||||
 	for (p = mlock_dereference(mp->ports, br);
 | 
			
		||||
 	     p != NULL;
 | 
			
		||||
 	     p = mlock_dereference(p->next, br)) {
 | 
			
		||||
@@ -200,7 +200,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 			continue;
 | 
			
		||||
 
 | 
			
		||||
 		if (!hlist_unhashed(&p->mglist) &&
 | 
			
		||||
@@ -1527,8 +1554,8 @@ out:
 | 
			
		||||
@@ -1529,8 +1556,8 @@ out:
 | 
			
		||||
 
 | 
			
		||||
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
 | 
			
		||||
 					 struct net_bridge_port *port,
 | 
			
		||||
@@ -211,7 +211,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 {
 | 
			
		||||
 	struct br_ip br_group;
 | 
			
		||||
 	struct bridge_mcast_own_query *own_query;
 | 
			
		||||
@@ -1543,14 +1570,14 @@ static void br_ip4_multicast_leave_group
 | 
			
		||||
@@ -1545,14 +1572,14 @@ static void br_ip4_multicast_leave_group
 | 
			
		||||
 	br_group.vid = vid;
 | 
			
		||||
 
 | 
			
		||||
 	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
 | 
			
		||||
@@ -228,7 +228,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 {
 | 
			
		||||
 	struct br_ip br_group;
 | 
			
		||||
 	struct bridge_mcast_own_query *own_query;
 | 
			
		||||
@@ -1565,7 +1592,7 @@ static void br_ip6_multicast_leave_group
 | 
			
		||||
@@ -1567,7 +1594,7 @@ static void br_ip6_multicast_leave_group
 | 
			
		||||
 	br_group.vid = vid;
 | 
			
		||||
 
 | 
			
		||||
 	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
 | 
			
		||||
@@ -237,7 +237,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 }
 | 
			
		||||
 #endif
 | 
			
		||||
 
 | 
			
		||||
@@ -1574,6 +1601,7 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
@@ -1576,6 +1603,7 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
 				 struct sk_buff *skb,
 | 
			
		||||
 				 u16 vid)
 | 
			
		||||
 {
 | 
			
		||||
@@ -245,7 +245,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 	struct sk_buff *skb_trimmed = NULL;
 | 
			
		||||
 	struct igmphdr *ih;
 | 
			
		||||
 	int err;
 | 
			
		||||
@@ -1590,12 +1618,13 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
@@ -1592,12 +1620,13 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
 
 | 
			
		||||
 	BR_INPUT_SKB_CB(skb)->igmp = 1;
 | 
			
		||||
 	ih = igmp_hdr(skb);
 | 
			
		||||
@@ -260,7 +260,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 		break;
 | 
			
		||||
 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
 | 
			
		||||
 		err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
 | 
			
		||||
@@ -1604,7 +1633,7 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
@@ -1606,7 +1635,7 @@ static int br_multicast_ipv4_rcv(struct
 | 
			
		||||
 		err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
 | 
			
		||||
 		break;
 | 
			
		||||
 	case IGMP_HOST_LEAVE_MESSAGE:
 | 
			
		||||
@@ -269,7 +269,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 		break;
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
@@ -1620,6 +1649,7 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
@@ -1622,6 +1651,7 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
 				 struct sk_buff *skb,
 | 
			
		||||
 				 u16 vid)
 | 
			
		||||
 {
 | 
			
		||||
@@ -277,7 +277,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 	struct sk_buff *skb_trimmed = NULL;
 | 
			
		||||
 	struct mld_msg *mld;
 | 
			
		||||
 	int err;
 | 
			
		||||
@@ -1639,8 +1669,9 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
@@ -1641,8 +1671,9 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
 
 | 
			
		||||
 	switch (mld->mld_type) {
 | 
			
		||||
 	case ICMPV6_MGM_REPORT:
 | 
			
		||||
@@ -288,7 +288,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 		break;
 | 
			
		||||
 	case ICMPV6_MLD2_REPORT:
 | 
			
		||||
 		err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
 | 
			
		||||
@@ -1649,7 +1680,8 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
@@ -1651,7 +1682,8 @@ static int br_multicast_ipv6_rcv(struct
 | 
			
		||||
 		err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
 | 
			
		||||
 		break;
 | 
			
		||||
 	case ICMPV6_MGM_REDUCTION:
 | 
			
		||||
@@ -382,6 +382,8 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
 
 | 
			
		||||
-		port = (unsigned long)lport > (unsigned long)rport ?
 | 
			
		||||
-		       lport : rport;
 | 
			
		||||
-
 | 
			
		||||
-		prev = maybe_deliver(prev, port, skb, __packet_hook);
 | 
			
		||||
+		if ((unsigned long)lport > (unsigned long)rport) {
 | 
			
		||||
+			port = lport;
 | 
			
		||||
+			addr = p->unicast ? p->eth_addr : NULL;
 | 
			
		||||
@@ -389,8 +391,7 @@ Implement optinal multicast->unicast conversion for igmp snooping
 | 
			
		||||
+			port = rport;
 | 
			
		||||
+			addr = NULL;
 | 
			
		||||
+		}
 | 
			
		||||
 
 | 
			
		||||
-		prev = maybe_deliver(prev, port, skb, __packet_hook);
 | 
			
		||||
+
 | 
			
		||||
+		if (addr)
 | 
			
		||||
+			prev = maybe_deliver_addr(prev, port, skb, addr,
 | 
			
		||||
+						  __packet_hook);
 | 
			
		||||
 
 | 
			
		||||
@@ -51,7 +51,7 @@
 | 
			
		||||
 EXPORT_SYMBOL(default_qdisc_ops);
 | 
			
		||||
 
 | 
			
		||||
 /* Main transmission queue. */
 | 
			
		||||
@@ -728,7 +728,7 @@ static void attach_one_default_qdisc(str
 | 
			
		||||
@@ -731,7 +731,7 @@ static void attach_one_default_qdisc(str
 | 
			
		||||
 				     void *_unused)
 | 
			
		||||
 {
 | 
			
		||||
 	struct Qdisc *qdisc;
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
--- a/net/sched/sch_generic.c
 | 
			
		||||
+++ b/net/sched/sch_generic.c
 | 
			
		||||
@@ -435,139 +435,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
 | 
			
		||||
@@ -438,139 +438,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
 | 
			
		||||
 	.owner		=	THIS_MODULE,
 | 
			
		||||
 };
 | 
			
		||||
 
 | 
			
		||||
 
 | 
			
		||||
@@ -143,7 +143,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 static const struct rt6_info ip6_blk_hole_entry_template = {
 | 
			
		||||
 	.dst = {
 | 
			
		||||
 		.__refcnt	= ATOMIC_INIT(1),
 | 
			
		||||
@@ -1883,6 +1900,11 @@ static struct rt6_info *ip6_route_info_c
 | 
			
		||||
@@ -1885,6 +1902,11 @@ static struct rt6_info *ip6_route_info_c
 | 
			
		||||
 			rt->dst.output = ip6_pkt_prohibit_out;
 | 
			
		||||
 			rt->dst.input = ip6_pkt_prohibit;
 | 
			
		||||
 			break;
 | 
			
		||||
@@ -155,7 +155,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 		case RTN_THROW:
 | 
			
		||||
 		case RTN_UNREACHABLE:
 | 
			
		||||
 		default:
 | 
			
		||||
@@ -2484,6 +2506,17 @@ static int ip6_pkt_prohibit_out(struct n
 | 
			
		||||
@@ -2486,6 +2508,17 @@ static int ip6_pkt_prohibit_out(struct n
 | 
			
		||||
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -173,7 +173,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 /*
 | 
			
		||||
  *	Allocate a dst for local (unicast / anycast) address.
 | 
			
		||||
  */
 | 
			
		||||
@@ -2726,7 +2759,8 @@ static int rtm_to_fib6_config(struct sk_
 | 
			
		||||
@@ -2728,7 +2761,8 @@ static int rtm_to_fib6_config(struct sk_
 | 
			
		||||
 	if (rtm->rtm_type == RTN_UNREACHABLE ||
 | 
			
		||||
 	    rtm->rtm_type == RTN_BLACKHOLE ||
 | 
			
		||||
 	    rtm->rtm_type == RTN_PROHIBIT ||
 | 
			
		||||
@@ -183,7 +183,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 		cfg->fc_flags |= RTF_REJECT;
 | 
			
		||||
 
 | 
			
		||||
 	if (rtm->rtm_type == RTN_LOCAL)
 | 
			
		||||
@@ -3085,6 +3119,9 @@ static int rt6_fill_node(struct net *net
 | 
			
		||||
@@ -3087,6 +3121,9 @@ static int rt6_fill_node(struct net *net
 | 
			
		||||
 		case -EACCES:
 | 
			
		||||
 			rtm->rtm_type = RTN_PROHIBIT;
 | 
			
		||||
 			break;
 | 
			
		||||
@@ -193,7 +193,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 		case -EAGAIN:
 | 
			
		||||
 			rtm->rtm_type = RTN_THROW;
 | 
			
		||||
 			break;
 | 
			
		||||
@@ -3358,6 +3395,8 @@ static int ip6_route_dev_notify(struct n
 | 
			
		||||
@@ -3360,6 +3397,8 @@ static int ip6_route_dev_notify(struct n
 | 
			
		||||
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 | 
			
		||||
 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
 | 
			
		||||
 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
 | 
			
		||||
@@ -202,7 +202,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 | 
			
		||||
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 | 
			
		||||
 #endif
 | 
			
		||||
@@ -3574,6 +3613,17 @@ static int __net_init ip6_route_net_init
 | 
			
		||||
@@ -3576,6 +3615,17 @@ static int __net_init ip6_route_net_init
 | 
			
		||||
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 | 
			
		||||
 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
 | 
			
		||||
 			 ip6_template_metrics, true);
 | 
			
		||||
@@ -220,7 +220,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 #endif
 | 
			
		||||
 
 | 
			
		||||
 	net->ipv6.sysctl.flush_delay = 0;
 | 
			
		||||
@@ -3592,6 +3642,8 @@ out:
 | 
			
		||||
@@ -3594,6 +3644,8 @@ out:
 | 
			
		||||
 	return ret;
 | 
			
		||||
 
 | 
			
		||||
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 | 
			
		||||
@@ -229,7 +229,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 out_ip6_prohibit_entry:
 | 
			
		||||
 	kfree(net->ipv6.ip6_prohibit_entry);
 | 
			
		||||
 out_ip6_null_entry:
 | 
			
		||||
@@ -3609,6 +3661,7 @@ static void __net_exit ip6_route_net_exi
 | 
			
		||||
@@ -3611,6 +3663,7 @@ static void __net_exit ip6_route_net_exi
 | 
			
		||||
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 | 
			
		||||
 	kfree(net->ipv6.ip6_prohibit_entry);
 | 
			
		||||
 	kfree(net->ipv6.ip6_blk_hole_entry);
 | 
			
		||||
@@ -237,7 +237,7 @@ Signed-off-by: Jonas Gorski <jogo@openwrt.org>
 | 
			
		||||
 #endif
 | 
			
		||||
 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 | 
			
		||||
 }
 | 
			
		||||
@@ -3706,6 +3759,9 @@ int __init ip6_route_init(void)
 | 
			
		||||
@@ -3708,6 +3761,9 @@ int __init ip6_route_init(void)
 | 
			
		||||
 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
 | 
			
		||||
 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
 | 
			
		||||
 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
 | 
			
		||||
 
 | 
			
		||||
@@ -110,11 +110,11 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
 | 
			
		||||
+	for (i = 0; i < host->n_ports; i++) {
 | 
			
		||||
+		if (unlikely(!host->ports[i]->ledtrig))
 | 
			
		||||
+			continue;
 | 
			
		||||
+
 | 
			
		||||
 
 | 
			
		||||
+		snprintf(host->ports[i]->ledtrig_name,
 | 
			
		||||
+			sizeof(host->ports[i]->ledtrig_name), "ata%u",
 | 
			
		||||
+			host->ports[i]->print_id);
 | 
			
		||||
 
 | 
			
		||||
+
 | 
			
		||||
+		host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
 | 
			
		||||
+
 | 
			
		||||
+		if (led_trigger_register(host->ports[i]->ledtrig)) {
 | 
			
		||||
 
 | 
			
		||||
@@ -503,9 +503,6 @@ Signed-off-by: John Crispin <blogic@openwrt.org>
 | 
			
		||||
+	mediatek,reset-pin = <&pio 15 0>;
 | 
			
		||||
+	status = "okay";
 | 
			
		||||
+};
 | 
			
		||||
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
 | 
			
		||||
new file mode 100644
 | 
			
		||||
index 0000000..80c1ab8
 | 
			
		||||
--- /dev/null
 | 
			
		||||
+++ b/arch/arm/boot/dts/mt7623.dtsi
 | 
			
		||||
@@ -0,0 +1,593 @@
 | 
			
		||||
 
 | 
			
		||||
@@ -9,11 +9,9 @@ Signed-off-by: John Crispin <blogic@openwrt.org>
 | 
			
		||||
 drivers/net/ethernet/mediatek/mtk_eth_soc.h |    5 +-
 | 
			
		||||
 2 files changed, 44 insertions(+), 50 deletions(-)
 | 
			
		||||
 | 
			
		||||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 | 
			
		||||
index 5d33053..2e05920 100644
 | 
			
		||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 | 
			
		||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 | 
			
		||||
@@ -326,7 +326,7 @@ static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
 | 
			
		||||
@@ -326,7 +326,7 @@ static inline void mtk_irq_disable(struc
 | 
			
		||||
 	val = mtk_r32(eth, MTK_QDMA_INT_MASK);
 | 
			
		||||
 	mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
 | 
			
		||||
 	/* flush write */
 | 
			
		||||
@@ -22,7 +20,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	spin_unlock_irqrestore(ð->irq_lock, flags);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -339,7 +339,7 @@ static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
 | 
			
		||||
@@ -339,7 +339,7 @@ static inline void mtk_irq_enable(struct
 | 
			
		||||
 	val = mtk_r32(eth, MTK_QDMA_INT_MASK);
 | 
			
		||||
 	mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
 | 
			
		||||
 	/* flush write */
 | 
			
		||||
@@ -31,7 +29,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	spin_unlock_irqrestore(ð->irq_lock, flags);
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -710,10 +710,26 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
 | 
			
		||||
@@ -710,10 +710,26 @@ static inline int mtk_cal_txd_req(struct
 | 
			
		||||
 	return nfrags;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
@@ -58,7 +56,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	for (i = 0; i < MTK_MAC_COUNT; i++) {
 | 
			
		||||
 		if (!eth->netdev[i])
 | 
			
		||||
 			continue;
 | 
			
		||||
@@ -725,6 +741,7 @@ static void mtk_stop_queue(struct mtk_eth *eth)
 | 
			
		||||
@@ -725,6 +741,7 @@ static void mtk_stop_queue(struct mtk_et
 | 
			
		||||
 {
 | 
			
		||||
 	int i;
 | 
			
		||||
 
 | 
			
		||||
@@ -66,7 +64,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	for (i = 0; i < MTK_MAC_COUNT; i++) {
 | 
			
		||||
 		if (!eth->netdev[i])
 | 
			
		||||
 			continue;
 | 
			
		||||
@@ -775,12 +792,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
 | 
			
		||||
@@ -775,12 +792,9 @@ static int mtk_start_xmit(struct sk_buff
 | 
			
		||||
 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
 | 
			
		||||
 		goto drop;
 | 
			
		||||
 
 | 
			
		||||
@@ -81,7 +79,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	spin_unlock_irqrestore(ð->page_lock, flags);
 | 
			
		||||
 
 | 
			
		||||
 	return NETDEV_TX_OK;
 | 
			
		||||
@@ -927,7 +941,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
 | 
			
		||||
@@ -927,7 +941,6 @@ static int mtk_poll_tx(struct mtk_eth *e
 | 
			
		||||
 		}
 | 
			
		||||
 		mtk_tx_unmap(eth->dev, tx_buf);
 | 
			
		||||
 
 | 
			
		||||
@@ -89,7 +87,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 		ring->last_free = desc;
 | 
			
		||||
 		atomic_inc(&ring->free_count);
 | 
			
		||||
 
 | 
			
		||||
@@ -945,11 +958,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
 | 
			
		||||
@@ -945,11 +958,8 @@ static int mtk_poll_tx(struct mtk_eth *e
 | 
			
		||||
 		netdev_completed_queue(eth->netdev[i], done, bytes);
 | 
			
		||||
 	}
 | 
			
		||||
 
 | 
			
		||||
@@ -103,7 +101,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 		mtk_wake_queue(eth);
 | 
			
		||||
 
 | 
			
		||||
 	return done;
 | 
			
		||||
@@ -973,10 +983,11 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
 | 
			
		||||
@@ -973,10 +983,11 @@ static int mtk_napi_tx(struct napi_struc
 | 
			
		||||
 	int tx_done = 0;
 | 
			
		||||
 
 | 
			
		||||
 	mtk_handle_status_irq(eth);
 | 
			
		||||
@@ -117,7 +115,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 		mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
 | 
			
		||||
 		dev_info(eth->dev,
 | 
			
		||||
 			 "done tx %d, intr 0x%08x/0x%x\n",
 | 
			
		||||
@@ -1002,9 +1013,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
 | 
			
		||||
@@ -1002,9 +1013,12 @@ static int mtk_napi_rx(struct napi_struc
 | 
			
		||||
 	u32 status, mask;
 | 
			
		||||
 	int rx_done = 0;
 | 
			
		||||
 
 | 
			
		||||
@@ -131,7 +129,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 		mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
 | 
			
		||||
 		dev_info(eth->dev,
 | 
			
		||||
 			 "done rx %d, intr 0x%08x/0x%x\n",
 | 
			
		||||
@@ -1052,9 +1066,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 | 
			
		||||
@@ -1052,9 +1066,8 @@ static int mtk_tx_alloc(struct mtk_eth *
 | 
			
		||||
 
 | 
			
		||||
 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
 | 
			
		||||
 	ring->next_free = &ring->dma[0];
 | 
			
		||||
@@ -143,7 +141,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 
 | 
			
		||||
 	/* make sure that all changes to the dma ring are flushed before we
 | 
			
		||||
 	 * continue
 | 
			
		||||
@@ -1259,21 +1272,11 @@ static void mtk_tx_timeout(struct net_device *dev)
 | 
			
		||||
@@ -1259,21 +1272,11 @@ static void mtk_tx_timeout(struct net_de
 | 
			
		||||
 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
 | 
			
		||||
 {
 | 
			
		||||
 	struct mtk_eth *eth = _eth;
 | 
			
		||||
@@ -168,7 +166,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 
 | 
			
		||||
 	return IRQ_HANDLED;
 | 
			
		||||
 }
 | 
			
		||||
@@ -1281,21 +1284,11 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
 | 
			
		||||
@@ -1281,21 +1284,11 @@ static irqreturn_t mtk_handle_irq_rx(int
 | 
			
		||||
 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
 | 
			
		||||
 {
 | 
			
		||||
 	struct mtk_eth *eth = _eth;
 | 
			
		||||
@@ -193,7 +191,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 
 | 
			
		||||
 	return IRQ_HANDLED;
 | 
			
		||||
 }
 | 
			
		||||
@@ -1326,7 +1319,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
 | 
			
		||||
@@ -1326,7 +1319,7 @@ static int mtk_start_dma(struct mtk_eth
 | 
			
		||||
 	mtk_w32(eth,
 | 
			
		||||
 		MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
 | 
			
		||||
 		MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
 | 
			
		||||
@@ -202,7 +200,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 		MTK_QDMA_GLO_CFG);
 | 
			
		||||
 
 | 
			
		||||
 	return 0;
 | 
			
		||||
@@ -1440,7 +1433,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
 | 
			
		||||
@@ -1440,7 +1433,7 @@ static int __init mtk_hw_init(struct mtk
 | 
			
		||||
 
 | 
			
		||||
 	/* disable delay and normal interrupt */
 | 
			
		||||
 	mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
 | 
			
		||||
@@ -211,7 +209,7 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
 | 
			
		||||
 	mtk_w32(eth, 0, MTK_RST_GL);
 | 
			
		||||
 
 | 
			
		||||
@@ -1765,7 +1758,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
 | 
			
		||||
@@ -1765,7 +1758,7 @@ static int mtk_add_mac(struct mtk_eth *e
 | 
			
		||||
 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 | 
			
		||||
 
 | 
			
		||||
 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
 | 
			
		||||
@@ -220,8 +218,6 @@ index 5d33053..2e05920 100644
 | 
			
		||||
 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
 | 
			
		||||
 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
 | 
			
		||||
 	eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
 | 
			
		||||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
 | 
			
		||||
index 5093518..6b22445 100644
 | 
			
		||||
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
 | 
			
		||||
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
 | 
			
		||||
@@ -18,9 +18,9 @@
 | 
			
		||||
@@ -244,6 +240,3 @@ index 5093518..6b22445 100644
 | 
			
		||||
 #define MTK_TX_WB_DDONE		BIT(6)
 | 
			
		||||
 #define MTK_DMA_SIZE_16DWORDS	(2 << 4)
 | 
			
		||||
 #define MTK_RX_DMA_BUSY		BIT(3)
 | 
			
		||||
-- 
 | 
			
		||||
1.7.10.4
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user