Files
openwrt-R7800-nss/target/linux/ipq806x/patches-6.6/990-0317-net-bonding-Added-acceleration-support-over-LAG-inte.patch
2025-08-04 12:58:18 +02:00

738 lines
21 KiB
Diff

From d7d651481155a9225f5a5a0482631950e8fb6d69 Mon Sep 17 00:00:00 2001
From: Ratheesh Kannoth <rkannoth@codeaurora.org>
Date: Fri, 15 May 2020 12:36:11 +0530
Subject: [PATCH 311/500] net/bonding: Added acceleration support over LAG
interface
The accel support is enabled for the following LAG modes,
1. Balance-Xor (Static LAG)
2. 802.3ad (Dynamic LAG)
Change-Id: I4524902ddec583a2963cb38731c062f216a5386d
Signed-off-by: Shyam Sunder <ssunde@codeaurora.org>
net/bonding: Fix for Load Balancing within bonding (LAG) group
New APIs (without skb) introduced in bonding driver to calculate hash on given
parameters (IP version, src IP, dst IP, src MAC, dst MAC) has issue in creating
flow_key. flow_get_u32_dst is returning 0 for given flow_key because addr_type
is not set correctly. Fix is added to populate addr_type. Test different flows
and verified that the flows are distributed.
Change-Id: Ie54a40f590010ea2994ffc4ace8f4d050258a06f
Signed-off-by: Bhaskar Valaboju <bhaskarv@codeaurora.org>
Signed-off-by: Ratheesh Kannoth <rkannoth@codeaurora.org>
net: bonding: Relocate kernel bond functions
header files included in uapi folder are the interface between
userspace and kernel space. This path declares new bonding
functions in kernel header file instead of uapi header file.
Change-Id: I1b33f07015b79ec4ac702a9079da01f1c2a08bbb
Signed-off-by: Ratheesh Kannoth <rkannoth@codeaurora.org>
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
---
drivers/net/bonding/bond_3ad.c | 160 ++++++++++++++++-
drivers/net/bonding/bond_main.c | 306 ++++++++++++++++++++++++++++++--
include/net/bond_3ad.h | 7 +-
include/net/bonding.h | 21 +++
4 files changed, 481 insertions(+), 13 deletions(-)
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -116,6 +116,38 @@ static void ad_marker_response_received(
struct port *port);
static void ad_update_actor_keys(struct port *port, bool reset);
+struct bond_cb __rcu *bond_cb;
+
+int bond_register_cb(struct bond_cb *cb)
+{
+ struct bond_cb *lag_cb;
+
+ lag_cb = kzalloc(sizeof(*lag_cb), GFP_ATOMIC | __GFP_NOWARN);
+ if (!lag_cb) {
+ return -1;
+ }
+
+ memcpy((void *)lag_cb, (void *)cb, sizeof(*cb));
+
+ rcu_read_lock();
+ rcu_assign_pointer(bond_cb, lag_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(bond_register_cb);
+
+void bond_unregister_cb(void)
+{
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ rcu_assign_pointer(bond_cb, NULL);
+ rcu_read_unlock();
+
+ kfree(lag_cb_main);
+}
+EXPORT_SYMBOL(bond_unregister_cb);
/* ================= api to bonding and kernel code ================== */
@@ -430,7 +462,6 @@ static u16 __ad_timer_to_ticks(u16 timer
return retval;
}
-
/* ================= ad_rx_machine helper functions ================== */
/**
@@ -1073,6 +1104,28 @@ static void ad_mux_machine(struct port *
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
+
+ /* Send a notificaton about change in state of this
+ * port. We only want to handle case where port moves
+ * from AD_MUX_COLLECTING_DISTRIBUTING ->
+ * AD_MUX_ATTACHED.
+ */
+ if (bond_slave_is_up(port->slave) &&
+ (last_state == AD_MUX_COLLECTING_DISTRIBUTING)) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_down) {
+ struct net_device *dev;
+
+ dev = port->slave->dev;
+ lag_cb_main->bond_cb_link_down(dev);
+ }
+ rcu_read_unlock();
+ }
+
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= LACP_STATE_COLLECTING;
@@ -1917,6 +1970,7 @@ static void ad_enable_collecting_distrib
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
+ struct bond_cb *lag_cb_main;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Enabling port %d (LAG %d)\n",
port->actor_port_number,
@@ -1924,6 +1978,14 @@ static void ad_enable_collecting_distrib
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(port->slave->dev);
+
+ rcu_read_unlock();
}
}
@@ -2683,6 +2745,102 @@ int bond_3ad_get_active_agg_info(struct
return ret;
}
+/* bond_3ad_get_tx_dev - Calculate egress interface for a given packet,
+ * for a LAG that is configured in 802.3AD mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address
+ * @dst: pointer to destination L3 address
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash is used to calculate hash using L2/L3
+ * addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, u8 *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct aggregator *agg;
+ struct ad_info ad_info;
+ struct list_head *iter;
+ struct slave *slave;
+ struct slave *first_ok_slave = NULL;
+ u32 hash = 0;
+ int slaves_in_agg;
+ int slave_agg_no = 0;
+ int agg_id;
+
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ slaves_in_agg = ad_info.ports;
+ agg_id = ad_info.aggregator_id;
+
+ if (slaves_in_agg == 0) {
+ pr_debug("%s: Error: active aggregator is empty\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_agg_no = hash % slaves_in_agg;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for 802.3AD fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ slave_agg_no = hash % slaves_in_agg;
+ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
+
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && bond_slave_can_tx(slave))
+ first_ok_slave = slave;
+ slave_agg_no--;
+ continue;
+ }
+
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ if (slave_agg_no >= 0) {
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ bond_dev->name, agg_id);
+ return NULL;
+ }
+
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found.
+ */
+ if (first_ok_slave)
+ return first_ok_slave->dev;
+
+ return NULL;
+}
+
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(
#endif
unsigned int bond_net_id __read_mostly;
+static unsigned long bond_id_mask = 0xFFFFFFF0;
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
@@ -287,6 +288,19 @@ const char *bond_mode_name(int mode)
return names[mode];
}
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+ return bond->id;
+}
+EXPORT_SYMBOL(bond_get_id);
+
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -1231,6 +1245,21 @@ void bond_change_active_slave(struct bon
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
+ if (bond->params.mode == BOND_MODE_XOR) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_up) {
+ struct net_device *dev;
+
+ dev = new_active->dev;
+ lag_cb_main->bond_cb_link_up(dev);
+ }
+ rcu_read_unlock();
+ }
+
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
@@ -1876,6 +1905,7 @@ int bond_enslave(struct net_device *bond
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main;
int link_reporting;
int res = 0, i;
@@ -2336,6 +2366,13 @@ skip_mac_set:
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
@@ -2401,6 +2438,13 @@ err_undo_flags:
}
}
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+
return res;
}
@@ -2422,6 +2466,7 @@ static int __bond_release_one(struct net
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main;
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -2444,6 +2489,13 @@ static int __bond_release_one(struct net
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_release)
+ lag_cb_main->bond_cb_release(slave_dev);
+
+ rcu_read_unlock();
+
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
@@ -2772,8 +2824,8 @@ static void bond_miimon_commit(struct bo
struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
-
- ASSERT_RTNL();
+ struct net_device *slave_dev = NULL;
+ struct bond_cb *lag_cb_main;
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
@@ -2811,6 +2863,10 @@ static void bond_miimon_commit(struct bo
bond_set_active_slave(slave);
}
+ if ((bond->params.mode == BOND_MODE_XOR) &&
+ (!slave_dev))
+ slave_dev = slave->dev;
+
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2859,6 +2915,14 @@ static void bond_miimon_commit(struct bo
unblock_netpoll_tx();
}
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (slave_dev && lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(slave_dev);
+
+ rcu_read_unlock();
+
bond_set_carrier(bond);
}
@@ -4111,11 +4175,221 @@ static inline u32 bond_eth_hash(struct s
return 0;
ep = (struct ethhdr *)(data + mhoff);
- return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
+ return ep->h_dest[5] ^ ep->h_source[5];
+}
+
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect_without_skb(struct bonding *bond,
+ u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst,
+ u16 protocol, __be16 *layer4hdr,
+ struct flow_keys *fk)
+{
+ u32 *src = NULL;
+ u32 *dst = NULL;
+
+ fk->ports.ports = 0;
+ src = (uint32_t *)psrc;
+ dst = (uint32_t *)pdst;
+
+ if (protocol == htons(ETH_P_IP)) {
+ /* V4 addresses and address type*/
+ fk->addrs.v4addrs.src = src[0];
+ fk->addrs.v4addrs.dst = dst[0];
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ /* V6 addresses and address type*/
+ memcpy(&fk->addrs.v6addrs.src, src, sizeof(struct in6_addr));
+ memcpy(&fk->addrs.v6addrs.dst, dst, sizeof(struct in6_addr));
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ } else {
+ return false;
+ }
+ if ((bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) &&
+ (layer4hdr))
+ fk->ports.ports = *layer4hdr;
+
+ return true;
+}
+
+/* bond_xmit_hash_without_skb - Applies load balancing algorithm for a packet,
+ * to calculate hash for a given set of L2/L3 addresses. Does not
+ * calculate egress interface.
+ */
+uint32_t bond_xmit_hash_without_skb(u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct flow_keys flow;
+ u32 hash = 0;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect_without_skb(bond, src_mac, dst_mac, psrc,
+ pdst, protocol, layer4hdr, &flow))
+ return (dst_mac[5] ^ src_mac[5]);
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23)
+ hash = dst_mac[5] ^ src_mac[5];
+ else if (layer4hdr)
+ hash = (__force u32)flow.ports.ports;
+
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+
+ return hash;
+}
+
+/* bond_xor_get_tx_dev - Calculate egress interface for a given packet for a LAG
+ * that is configured in balance-xor mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash_without_skb is used to calculate hash using
+ * L2/L3 addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+static struct net_device *bond_xor_get_tx_dev(struct sk_buff *skb,
+ u8 *src_mac, u8 *dst_mac,
+ void *src, void *dst,
+ u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
+ int slave_id = 0, i = 0;
+ u32 hash;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slave_cnt == 0) {
+ pr_debug("%s: Error: No slave is attached to the interface\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_id = hash % slave_cnt;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for balance-XOR fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, src,
+ dst, protocol, bond_dev,
+ layer4hdr);
+ slave_id = hash % slave_cnt;
+ }
+
+ i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0) {
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0)
+ break;
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ return NULL;
+}
+
+/* bond_get_tx_dev - Calculate egress interface for a given packet.
+ *
+ * Supports 802.3AD and balance-xor modes
+ *
+ * @skb: pointer to skb to be egressed, if valid
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * Returns: Either valid slave device, or NULL for un-supported LAG modes
+ */
+struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond;
+
+ if (!bond_dev)
+ return NULL;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return NULL;
+
+ bond = netdev_priv(bond_dev);
+
+ switch (bond->params.mode) {
+ case BOND_MODE_XOR:
+ return bond_xor_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ case BOND_MODE_8023AD:
+ return bond_3ad_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(bond_get_tx_dev);
+
+/* In bond_xmit_xor() , we determine the output device by using a pre-
+ * determined xmit_hash_policy(), If the selected device is not enabled,
+ * find the next active slave.
+ */
+static int bond_xmit_xor(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *outdev;
+
+ outdev = bond_xor_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev)
+ goto out;
+
+ bond_dev_queue_xmit(bond, skb, outdev);
+ goto final;
+out:
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+final:
+ return NETDEV_TX_OK;
}
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
+
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
@@ -5241,15 +5515,16 @@ static netdev_tx_t bond_3ad_xor_xmit(str
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- struct bond_up_slave *slaves;
- struct slave *slave;
+ struct net_device *outdev = NULL;
- slaves = rcu_dereference(bond->usable_slaves);
- slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
- if (likely(slave))
- return bond_dev_queue_xmit(bond, skb, slave->dev);
+ outdev = bond_3ad_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- return bond_tx_drop(dev, skb);
+ return bond_dev_queue_xmit(bond, skb, outdev);
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -5499,8 +5774,9 @@ static netdev_tx_t __bond_start_xmit(str
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
- case BOND_MODE_8023AD:
case BOND_MODE_XOR:
+ return bond_xmit_xor(skb, dev);
+ case BOND_MODE_8023AD:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
return bond_xmit_broadcast(skb, dev);
@@ -5937,6 +6213,9 @@ static void bond_destructor(struct net_d
if (bond->wq)
destroy_workqueue(bond->wq);
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
+
free_percpu(bond->rr_tx_counter);
}
@@ -6490,6 +6769,11 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
out:
rtnl_unlock();
return res;
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -302,8 +302,13 @@ int bond_3ad_lacpdu_recv(const struct sk
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ uint8_t *dst_mac, void *src,
+ void *dst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
size_t bond_3ad_stats_size(void);
#endif /* _NET_BOND_3AD_H */
-
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -90,6 +90,8 @@
#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
+extern struct bond_cb __rcu *bond_cb;
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -261,6 +263,7 @@ struct bonding {
struct mutex ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
+ u32 id;
};
#define bond_slave_get_rcu(dev) \
@@ -683,6 +686,11 @@ struct bond_vlan_tag *bond_verify_device
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+uint32_t bond_xmit_hash_without_skb(uint8_t *src_mac, uint8_t *dst_mac,
+ void *psrc, void *pdst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+
void bond_work_init_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
@@ -787,4 +795,17 @@ static inline netdev_tx_t bond_tx_drop(s
return NET_XMIT_DROP;
}
+struct bond_cb {
+ void (*bond_cb_link_up)(struct net_device *slave);
+ void (*bond_cb_link_down)(struct net_device *slave);
+ void (*bond_cb_enslave)(struct net_device *slave);
+ void (*bond_cb_release)(struct net_device *slave);
+ void (*bond_cb_delete_by_slave)(struct net_device *slave);
+ void (*bond_cb_delete_by_mac)(uint8_t *mac_addr);
+};
+
+extern int bond_register_cb(struct bond_cb *cb);
+extern void bond_unregister_cb(void);
+extern int bond_get_id(struct net_device *bond_dev);
+
#endif /* _NET_BONDING_H */