ipq806x: NSS Hardware Offloading additional patches
This commit is contained in:
		
							
								
								
									
										932
									
								
								target/linux/ipq806x/patches-5.15/999-300-qca-mcs-support.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										932
									
								
								target/linux/ipq806x/patches-5.15/999-300-qca-mcs-support.patch
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,932 @@ | ||||
| --- a/include/linux/if_bridge.h | ||||
| +++ b/include/linux/if_bridge.h | ||||
| @@ -235,4 +235,17 @@ extern br_get_dst_hook_t __rcu *br_get_d | ||||
|  extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br); | ||||
|  /* QCA NSS bridge-mgr support - End */ | ||||
|   | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +typedef struct net_bridge_port *br_get_dst_hook_t(const struct net_bridge_port *src, | ||||
| +		struct sk_buff **skb); | ||||
| +extern br_get_dst_hook_t __rcu *br_get_dst_hook; | ||||
| + | ||||
| +typedef int (br_multicast_handle_hook_t)(const struct net_bridge_port *src, | ||||
| +		struct sk_buff *skb); | ||||
| +extern br_multicast_handle_hook_t __rcu *br_multicast_handle_hook; | ||||
| + | ||||
| +typedef void (br_notify_hook_t)(int group, int event, const void *ptr); | ||||
| +extern br_notify_hook_t __rcu *br_notify_hook; | ||||
| +/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  #endif | ||||
| --- a/net/bridge/br_fdb.c | ||||
| +++ b/net/bridge/br_fdb.c | ||||
| @@ -186,6 +186,7 @@ struct net_bridge_fdb_entry *br_fdb_find | ||||
|  { | ||||
|  	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); | ||||
|  } | ||||
| +EXPORT_SYMBOL_GPL(br_fdb_find_rcu); /* QCA qca-mcs support */ | ||||
|   | ||||
|  /* When a static FDB entry is added, the mac address from the entry is | ||||
|   * added to the bridge private HW address list and all required ports | ||||
| @@ -892,6 +893,7 @@ static void fdb_notify(struct net_bridge | ||||
|  		kfree_skb(skb); | ||||
|  		goto errout; | ||||
|  	} | ||||
| +	__br_notify(RTNLGRP_NEIGH, type, fdb); /* QCA qca-mcs support */ | ||||
|  	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); | ||||
|  	return; | ||||
|  errout: | ||||
| --- a/net/bridge/br_private.h | ||||
| +++ b/net/bridge/br_private.h | ||||
| @@ -853,6 +853,7 @@ void br_manage_promisc(struct net_bridge | ||||
|  int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev); | ||||
|   | ||||
|  /* br_input.c */ | ||||
| +int br_pass_frame_up(struct sk_buff *skb); /* QCA qca-mcs support */ | ||||
|  int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); | ||||
|  rx_handler_func_t *br_get_rx_handler(const struct net_device *dev); | ||||
|   | ||||
| @@ -2125,4 +2126,14 @@ struct nd_msg *br_is_nd_neigh_msg(struct | ||||
|  #define __br_get(__hook, __default, __args ...) \ | ||||
|  		(__hook ? (__hook(__args)) : (__default)) | ||||
|  /* QCA NSS ECM support - End */ | ||||
| + | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +static inline void __br_notify(int group, int type, const void *data) | ||||
| +{ | ||||
| +	br_notify_hook_t *notify_hook = rcu_dereference(br_notify_hook); | ||||
| + | ||||
| +	if (notify_hook) | ||||
| +		notify_hook(group, type, data); | ||||
| +} | ||||
| +/* QCA qca-mcs support - End */ | ||||
|  #endif | ||||
| --- a/net/bridge/br_netlink.c | ||||
| +++ b/net/bridge/br_netlink.c | ||||
| @@ -609,6 +609,7 @@ void br_info_notify(int event, const str | ||||
|  		kfree_skb(skb); | ||||
|  		goto errout; | ||||
|  	} | ||||
| +	__br_notify(RTNLGRP_LINK, event, port); /* QCA qca-mcs support */ | ||||
|  	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | ||||
|  	return; | ||||
|  errout: | ||||
| --- a/net/bridge/br.c | ||||
| +++ b/net/bridge/br.c | ||||
| @@ -463,6 +463,12 @@ static void __exit br_deinit(void) | ||||
|  	br_fdb_fini(); | ||||
|  } | ||||
|   | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +/* Hook for bridge event notifications */ | ||||
| +br_notify_hook_t __rcu *br_notify_hook __read_mostly; | ||||
| +EXPORT_SYMBOL_GPL(br_notify_hook); | ||||
| +/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  module_init(br_init) | ||||
|  module_exit(br_deinit) | ||||
|  MODULE_LICENSE("GPL"); | ||||
| --- a/net/bridge/br_device.c | ||||
| +++ b/net/bridge/br_device.c | ||||
| @@ -82,6 +82,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff * | ||||
|  	if (is_broadcast_ether_addr(dest)) { | ||||
|  		br_flood(br, skb, BR_PKT_BROADCAST, false, true); | ||||
|  	} else if (is_multicast_ether_addr(dest)) { | ||||
| +		/* QCA qca-mcs support - Start */ | ||||
| +		br_multicast_handle_hook_t *multicast_handle_hook = rcu_dereference(br_multicast_handle_hook); | ||||
| +		if (!__br_get(multicast_handle_hook, true, NULL, skb)) | ||||
| +			goto out; | ||||
| +		/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  		if (unlikely(netpoll_tx_running(dev))) { | ||||
|  			br_flood(br, skb, BR_PKT_MULTICAST, false, true); | ||||
|  			goto out; | ||||
| --- a/net/bridge/br_input.c | ||||
| +++ b/net/bridge/br_input.c | ||||
| @@ -24,6 +24,16 @@ | ||||
|  #include "br_private_tunnel.h" | ||||
|  #include "br_private_offload.h" | ||||
|   | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +/* Hook for external Multicast handler */ | ||||
| +br_multicast_handle_hook_t __rcu *br_multicast_handle_hook __read_mostly; | ||||
| +EXPORT_SYMBOL_GPL(br_multicast_handle_hook); | ||||
| + | ||||
| +/* Hook for external forwarding logic */ | ||||
| +br_get_dst_hook_t __rcu *br_get_dst_hook __read_mostly; | ||||
| +EXPORT_SYMBOL_GPL(br_get_dst_hook); | ||||
| +/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  static int | ||||
|  br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) | ||||
|  { | ||||
| @@ -31,7 +41,7 @@ br_netif_receive_skb(struct net *net, st | ||||
|  	return netif_receive_skb(skb); | ||||
|  } | ||||
|   | ||||
| -static int br_pass_frame_up(struct sk_buff *skb) | ||||
| +int br_pass_frame_up(struct sk_buff *skb) | ||||
|  { | ||||
|  	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | ||||
|  	struct net_bridge *br = netdev_priv(brdev); | ||||
| @@ -70,6 +80,7 @@ static int br_pass_frame_up(struct sk_bu | ||||
|  		       dev_net(indev), NULL, skb, indev, NULL, | ||||
|  		       br_netif_receive_skb); | ||||
|  } | ||||
| +EXPORT_SYMBOL_GPL(br_pass_frame_up); /* QCA qca-mcs support */ | ||||
|   | ||||
|  /* note: already called with rcu_read_lock */ | ||||
|  int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | ||||
| @@ -85,6 +96,11 @@ int br_handle_frame_finish(struct net *n | ||||
|  	struct net_bridge *br; | ||||
|  	u16 vid = 0; | ||||
|  	u8 state; | ||||
| +	/* QCA qca-mcs support - Start */ | ||||
| +	br_multicast_handle_hook_t *multicast_handle_hook; | ||||
| +	struct net_bridge_port *pdst = NULL; | ||||
| +	br_get_dst_hook_t *get_dst_hook = rcu_dereference(br_get_dst_hook); | ||||
| +	/* QCA qca-mcs support - End */ | ||||
|   | ||||
|  	if (!p || p->state == BR_STATE_DISABLED) | ||||
|  		goto drop; | ||||
| @@ -141,6 +157,11 @@ int br_handle_frame_finish(struct net *n | ||||
|   | ||||
|  	switch (pkt_type) { | ||||
|  	case BR_PKT_MULTICAST: | ||||
| +		/* QCA qca-mcs support - Start */ | ||||
| +		multicast_handle_hook = rcu_dereference(br_multicast_handle_hook); | ||||
| +		if (!__br_get(multicast_handle_hook, true, p, skb)) | ||||
| +			goto out; | ||||
| +		/* QCA qca-mcs support - End */ | ||||
|  		mdst = br_mdb_get(brmctx, skb, vid); | ||||
|  		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | ||||
|  		    br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { | ||||
| @@ -156,8 +177,15 @@ int br_handle_frame_finish(struct net *n | ||||
|  		} | ||||
|  		break; | ||||
|  	case BR_PKT_UNICAST: | ||||
| -		dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); | ||||
| -		break; | ||||
| +		/* QCA qca-mcs support - Start */ | ||||
| +		pdst = __br_get(get_dst_hook, NULL, p, &skb); | ||||
| +		if (pdst) { | ||||
| +			if (!skb) | ||||
| +				goto out; | ||||
| +		} else { | ||||
| +		/* QCA qca-mcs support - End */ | ||||
| +			dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); | ||||
| +		} | ||||
|  	default: | ||||
|  		break; | ||||
|  	} | ||||
| @@ -172,13 +200,20 @@ int br_handle_frame_finish(struct net *n | ||||
|  			dst->used = now; | ||||
|  		br_forward(dst->dst, skb, local_rcv, false); | ||||
|  	} else { | ||||
| +		/* QCA qca-mcs support - Start */ | ||||
| +		if (pdst) { | ||||
| +			br_forward(pdst, skb, local_rcv, false); | ||||
| +			goto out1; | ||||
| +		} | ||||
| +		/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  		br_offload_skb_disable(skb); | ||||
|  		if (!mcast_hit) | ||||
|  			br_flood(br, skb, pkt_type, local_rcv, false); | ||||
|  		else | ||||
|  			br_multicast_flood(mdst, skb, brmctx, local_rcv, false); | ||||
|  	} | ||||
| - | ||||
| +out1: /* QCA qca-mcs support */ | ||||
|  	if (local_rcv) | ||||
|  		return br_pass_frame_up(skb); | ||||
|   | ||||
| --- a/include/linux/mroute.h | ||||
| +++ b/include/linux/mroute.h | ||||
| @@ -85,4 +85,44 @@ struct rtmsg; | ||||
|  int ipmr_get_route(struct net *net, struct sk_buff *skb, | ||||
|  		   __be32 saddr, __be32 daddr, | ||||
|  		   struct rtmsg *rtm, u32 portid); | ||||
| + | ||||
| +/* QCA ECM qca-mcs support - Start */ | ||||
| +#define IPMR_MFC_EVENT_UPDATE   1 | ||||
| +#define IPMR_MFC_EVENT_DELETE   2 | ||||
| + | ||||
| +/* | ||||
| + * Callback to registered modules in the event of updates to a multicast group | ||||
| + */ | ||||
| +typedef void (*ipmr_mfc_event_offload_callback_t)(__be32 origin, __be32 group, | ||||
| +						  u32 max_dest_dev, | ||||
| +						  u32 dest_dev_idx[], | ||||
| +						  u8 op); | ||||
| + | ||||
| +/* | ||||
| + * Register the callback used to inform offload modules when updates occur to | ||||
| + * MFC. The callback is registered by offload modules | ||||
| + */ | ||||
| +extern bool ipmr_register_mfc_event_offload_callback( | ||||
| +			ipmr_mfc_event_offload_callback_t mfc_offload_cb); | ||||
| + | ||||
| +/* | ||||
| + * De-Register the callback used to inform offload modules when updates occur | ||||
| + * to MFC | ||||
| + */ | ||||
| +extern void ipmr_unregister_mfc_event_offload_callback(void); | ||||
| + | ||||
| +/* | ||||
| + * Find the destination interface list, given a multicast group and source | ||||
| + */ | ||||
| +extern int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group, | ||||
| +				 u32 max_dst_cnt, u32 dest_dev[]); | ||||
| + | ||||
| +/* | ||||
| + * Out-of-band multicast statistics update for flows that are offloaded from | ||||
| + * Linux | ||||
| + */ | ||||
| +extern int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group, | ||||
| +				 u64 pkts_in, u64 bytes_in, | ||||
| +				 u64 pkts_out, u64 bytes_out); | ||||
| +/* QCA ECM qca-mcs support - End */ | ||||
|  #endif | ||||
| --- a/include/linux/mroute6.h | ||||
| +++ b/include/linux/mroute6.h | ||||
| @@ -110,4 +110,47 @@ static inline int ip6mr_sk_done(struct s | ||||
|  	return 0; | ||||
|  } | ||||
|  #endif | ||||
| + | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +#define IP6MR_MFC_EVENT_UPDATE   1 | ||||
| +#define IP6MR_MFC_EVENT_DELETE   2 | ||||
| + | ||||
| +/* | ||||
| + * Callback to registered modules in the event of updates to a multicast group | ||||
| + */ | ||||
| +typedef void (*ip6mr_mfc_event_offload_callback_t)(struct in6_addr *origin, | ||||
| +						   struct in6_addr *group, | ||||
| +						   u32 max_dest_dev, | ||||
| +						   u32 dest_dev_idx[], | ||||
| +						   uint8_t op); | ||||
| + | ||||
| +/* | ||||
| + * Register the callback used to inform offload modules when updates occur | ||||
| + * to MFC. The callback is registered by offload modules | ||||
| + */ | ||||
| +extern bool ip6mr_register_mfc_event_offload_callback( | ||||
| +			ip6mr_mfc_event_offload_callback_t mfc_offload_cb); | ||||
| + | ||||
| +/* | ||||
| + * De-Register the callback used to inform offload modules when updates occur | ||||
| + * to MFC | ||||
| + */ | ||||
| +extern void ip6mr_unregister_mfc_event_offload_callback(void); | ||||
| + | ||||
| +/* | ||||
| + * Find the destination interface list given a multicast group and source | ||||
| + */ | ||||
| +extern int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin, | ||||
| +				struct in6_addr *group, u32 max_dst_cnt, | ||||
| +				u32 dest_dev[]); | ||||
| + | ||||
| +/* | ||||
| + * Out-of-band multicast statistics update for flows that are offloaded from | ||||
| + * Linux | ||||
| + */ | ||||
| +extern int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin, | ||||
| +				  struct in6_addr *group, uint64_t pkts_in, | ||||
| +				  uint64_t bytes_in, uint64_t pkts_out, | ||||
| +				  uint64_t bytes_out); | ||||
| +/* QCA qca-mcs support - End */ | ||||
|  #endif | ||||
| --- a/net/ipv4/ipmr.c | ||||
| +++ b/net/ipv4/ipmr.c | ||||
| @@ -108,6 +108,15 @@ static void igmpmsg_netlink_event(struct | ||||
|  static void mroute_clean_tables(struct mr_table *mrt, int flags); | ||||
|  static void ipmr_expire_process(struct timer_list *t); | ||||
|   | ||||
| +/* QCA ECM qca-mcs support - Start */ | ||||
| +/* spinlock for offload */ | ||||
| +static DEFINE_SPINLOCK(lock); | ||||
| + | ||||
| +static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, | ||||
| +					 __be32 mcastgrp); | ||||
| +static ipmr_mfc_event_offload_callback_t __rcu ipmr_mfc_event_offload_callback; | ||||
| +/* QCA ECM qca-mcs support - End */ | ||||
| + | ||||
|  #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||||
|  #define ipmr_for_each_table(mrt, net)					\ | ||||
|  	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list,	\ | ||||
| @@ -222,6 +231,228 @@ static int ipmr_rule_fill(struct fib_rul | ||||
|  	return 0; | ||||
|  } | ||||
|   | ||||
| +/* QCA ECM qca-mcs support - Start */ | ||||
| +/* ipmr_sync_entry_update() | ||||
| + * Call the registered offload callback to report an update to a multicast | ||||
| + * route entry. The callback receives the list of destination interfaces and | ||||
| + * the interface count | ||||
| + */ | ||||
| +static void ipmr_sync_entry_update(struct mr_table *mrt, | ||||
| +				   struct mfc_cache *cache) | ||||
| +{ | ||||
| +	int vifi, dest_if_count = 0; | ||||
| +	u32 dest_dev[MAXVIFS]; | ||||
| +	__be32  origin; | ||||
| +	__be32  group; | ||||
| +	ipmr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	memset(dest_dev, 0, sizeof(dest_dev)); | ||||
| + | ||||
| +	origin = cache->mfc_origin; | ||||
| +	group = cache->mfc_mcastgrp; | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| +	for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		      (cache->_c.mfc_un.res.ttls[vifi] < 255))) { | ||||
| +			continue; | ||||
| +		} | ||||
| +		if (dest_if_count == MAXVIFS) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return; | ||||
| +		} | ||||
| + | ||||
| +		if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return; | ||||
| +		} | ||||
| +		dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; | ||||
| +		dest_if_count++; | ||||
| +	} | ||||
| +	read_unlock(&mrt_lock); | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (!offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return; | ||||
| +	} | ||||
| + | ||||
| +	offload_update_cb_f(group, origin, dest_if_count, dest_dev, | ||||
| +			    IPMR_MFC_EVENT_UPDATE); | ||||
| +	rcu_read_unlock(); | ||||
| +} | ||||
| + | ||||
| +/* ipmr_sync_entry_delete() | ||||
| + * Call the registered offload callback to inform of a multicast route entry | ||||
| + * delete event | ||||
| + */ | ||||
| +static void ipmr_sync_entry_delete(u32 origin, u32 group) | ||||
| +{ | ||||
| +	ipmr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (!offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return; | ||||
| +	} | ||||
| + | ||||
| +	offload_update_cb_f(group, origin, 0, NULL, IPMR_MFC_EVENT_DELETE); | ||||
| +	rcu_read_unlock(); | ||||
| +} | ||||
| + | ||||
| +/* ipmr_register_mfc_event_offload_callback() | ||||
| + * Register the IPv4 Multicast update offload callback with IPMR | ||||
| + */ | ||||
| +bool ipmr_register_mfc_event_offload_callback( | ||||
| +		ipmr_mfc_event_offload_callback_t mfc_offload_cb) | ||||
| +{ | ||||
| +	ipmr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return false; | ||||
| +	} | ||||
| +	rcu_read_unlock(); | ||||
| + | ||||
| +	spin_lock(&lock); | ||||
| +	rcu_assign_pointer(ipmr_mfc_event_offload_callback, mfc_offload_cb); | ||||
| +	spin_unlock(&lock); | ||||
| +	synchronize_rcu(); | ||||
| +	return true; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ipmr_register_mfc_event_offload_callback); | ||||
| + | ||||
| +/* ipmr_unregister_mfc_event_offload_callback() | ||||
| + * De-register the IPv4 Multicast update offload callback with IPMR | ||||
| + */ | ||||
| +void ipmr_unregister_mfc_event_offload_callback(void) | ||||
| +{ | ||||
| +	spin_lock(&lock); | ||||
| +	rcu_assign_pointer(ipmr_mfc_event_offload_callback, NULL); | ||||
| +	spin_unlock(&lock); | ||||
| +	synchronize_rcu(); | ||||
| +} | ||||
| +EXPORT_SYMBOL(ipmr_unregister_mfc_event_offload_callback); | ||||
| + | ||||
| +/* ipmr_find_mfc_entry() | ||||
| + * Returns destination interface list for a particular multicast flow, and | ||||
| + * the number of interfaces in the list | ||||
| + */ | ||||
| +int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group, | ||||
| +			u32 max_dest_cnt, u32 dest_dev[]) | ||||
| +{ | ||||
| +	int vifi, dest_if_count = 0; | ||||
| +	struct mr_table *mrt; | ||||
| +	struct mfc_cache *cache; | ||||
| + | ||||
| +	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||||
| +	if (!mrt) | ||||
| +		return -ENOENT; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	cache = ipmr_cache_find(mrt, origin, group); | ||||
| +	if (!cache) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return -ENOENT; | ||||
| +	} | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| +	for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		      (cache->_c.mfc_un.res.ttls[vifi] < 255))) { | ||||
| +			continue; | ||||
| +		} | ||||
| + | ||||
| +		/* We have another valid destination interface entry. Check if | ||||
| +		 * the number of the destination interfaces for the route is | ||||
| +		 * exceeding the size of the array given to us | ||||
| +		 */ | ||||
| +		if (dest_if_count == max_dest_cnt) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			rcu_read_unlock(); | ||||
| +			return -EINVAL; | ||||
| +		} | ||||
| + | ||||
| +		if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			rcu_read_unlock(); | ||||
| +			return -EINVAL; | ||||
| +		} | ||||
| + | ||||
| +		dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; | ||||
| +		dest_if_count++; | ||||
| +	} | ||||
| +	read_unlock(&mrt_lock); | ||||
| +	rcu_read_unlock(); | ||||
| + | ||||
| +	return dest_if_count; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ipmr_find_mfc_entry); | ||||
| + | ||||
| +/* ipmr_mfc_stats_update() | ||||
| + * Update the MFC/VIF statistics for offloaded flows | ||||
| + */ | ||||
| +int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group, | ||||
| +			  u64 pkts_in, u64 bytes_in, | ||||
| +			  u64 pkts_out, u64 bytes_out) | ||||
| +{ | ||||
| +	int vif, vifi; | ||||
| +	struct mr_table *mrt; | ||||
| +	struct mfc_cache *cache; | ||||
| + | ||||
| +	mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||||
| +	if (!mrt) | ||||
| +		return -ENOENT; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	cache = ipmr_cache_find(mrt, origin, group); | ||||
| +	if (!cache) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return -ENOENT; | ||||
| +	} | ||||
| + | ||||
| +	vif = cache->_c.mfc_parent; | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| +	if (!VIF_EXISTS(mrt, vif)) { | ||||
| +		read_unlock(&mrt_lock); | ||||
| +		rcu_read_unlock(); | ||||
| +		return -EINVAL; | ||||
| +	} | ||||
| + | ||||
| +	mrt->vif_table[vif].pkt_in += pkts_in; | ||||
| +	mrt->vif_table[vif].bytes_in += bytes_in; | ||||
| +	cache->_c.mfc_un.res.pkt  += pkts_out; | ||||
| +	cache->_c.mfc_un.res.bytes += bytes_out; | ||||
| + | ||||
| +	for (vifi = cache->_c.mfc_un.res.minvif; | ||||
| +			vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if ((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		    (cache->_c.mfc_un.res.ttls[vifi] < 255)) { | ||||
| +			if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +				read_unlock(&mrt_lock); | ||||
| +				rcu_read_unlock(); | ||||
| +				return -EINVAL; | ||||
| +			} | ||||
| +			mrt->vif_table[vifi].pkt_out += pkts_out; | ||||
| +			mrt->vif_table[vifi].bytes_out += bytes_out; | ||||
| +		} | ||||
| +	} | ||||
| +	read_unlock(&mrt_lock); | ||||
| +	rcu_read_unlock(); | ||||
| + | ||||
| +	return 0; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ipmr_mfc_stats_update); | ||||
| +/* QCA ECM qca-mcs support - End */ | ||||
| + | ||||
|  static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { | ||||
|  	.family		= RTNL_FAMILY_IPMR, | ||||
|  	.rule_size	= sizeof(struct ipmr_rule), | ||||
| @@ -1185,6 +1416,11 @@ static int ipmr_mfc_delete(struct mr_tab | ||||
|  	mroute_netlink_event(mrt, c, RTM_DELROUTE); | ||||
|  	mr_cache_put(&c->_c); | ||||
|   | ||||
| +	/* QCA ECM qca-mcs support - Start */ | ||||
| +	/* Inform offload modules of the delete event */ | ||||
| +	ipmr_sync_entry_delete(c->mfc_origin, c->mfc_mcastgrp); | ||||
| +	/* QCA ECM qca-mcs support - End */ | ||||
| + | ||||
|  	return 0; | ||||
|  } | ||||
|   | ||||
| @@ -1214,6 +1450,12 @@ static int ipmr_mfc_add(struct net *net, | ||||
|  		call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c, | ||||
|  					      mrt->id); | ||||
|  		mroute_netlink_event(mrt, c, RTM_NEWROUTE); | ||||
| + | ||||
| +		/* QCA ECM qca-mcs support - Start */ | ||||
| +		/* Inform offload modules of the update event */ | ||||
| +		ipmr_sync_entry_update(mrt, c); | ||||
| +		/* QCA ECM qca-mcs support - End */ | ||||
| + | ||||
|  		return 0; | ||||
|  	} | ||||
|   | ||||
| @@ -1274,6 +1516,7 @@ static void mroute_clean_tables(struct m | ||||
|  	struct net *net = read_pnet(&mrt->net); | ||||
|  	struct mr_mfc *c, *tmp; | ||||
|  	struct mfc_cache *cache; | ||||
| +	u32 origin, group; /* QCA ECM qca-mcs support */ | ||||
|  	LIST_HEAD(list); | ||||
|  	int i; | ||||
|   | ||||
| @@ -1298,10 +1541,19 @@ static void mroute_clean_tables(struct m | ||||
|  			rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); | ||||
|  			list_del_rcu(&c->list); | ||||
|  			cache = (struct mfc_cache *)c; | ||||
| +			/* QCA ECM qca-mcs support - Start */ | ||||
| +			origin = cache->mfc_origin; | ||||
| +			group = cache->mfc_mcastgrp; | ||||
| +			/* QCA ECM qca-mcs support - End */ | ||||
|  			call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache, | ||||
|  						      mrt->id); | ||||
|  			mroute_netlink_event(mrt, cache, RTM_DELROUTE); | ||||
|  			mr_cache_put(c); | ||||
| + | ||||
| +			/* QCA ECM qca-mcs support - Start */ | ||||
| +			/* Inform offload modules of the delete event */ | ||||
| +			ipmr_sync_entry_delete(origin, group); | ||||
| +			/* QCA ECM qca-mcs support - End */ | ||||
|  		} | ||||
|  	} | ||||
|   | ||||
| --- a/net/ipv6/ip6mr.c | ||||
| +++ b/net/ipv6/ip6mr.c | ||||
| @@ -95,6 +95,17 @@ static int ip6mr_rtm_dumproute(struct sk | ||||
|  static void mroute_clean_tables(struct mr_table *mrt, int flags); | ||||
|  static void ipmr_expire_process(struct timer_list *t); | ||||
|   | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +/* Spinlock for offload */ | ||||
| +static DEFINE_SPINLOCK(lock); | ||||
| + | ||||
| +static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt, | ||||
| +					   const struct in6_addr *origin, | ||||
| +					   const struct in6_addr *mcastgrp); | ||||
| +static ip6mr_mfc_event_offload_callback_t __rcu | ||||
| +				ip6mr_mfc_event_offload_callback; | ||||
| +/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||||
|  #define ip6mr_for_each_table(mrt, net) \ | ||||
|  	list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ | ||||
| @@ -380,6 +391,227 @@ static struct mr_table_ops ip6mr_mr_tabl | ||||
|  	.cmparg_any = &ip6mr_mr_table_ops_cmparg_any, | ||||
|  }; | ||||
|   | ||||
| +/* QCA qca-mcs support - Start */ | ||||
| +/* ip6mr_sync_entry_update() | ||||
| + * Call the registered offload callback to report an update to a multicast | ||||
| + * route entry. The callback receives the list of destination interfaces and | ||||
| + * the interface count | ||||
| + */ | ||||
| +static void ip6mr_sync_entry_update(struct mr_table *mrt, | ||||
| +				    struct mfc6_cache *cache) | ||||
| +{ | ||||
| +	int vifi, dest_if_count = 0; | ||||
| +	u32 dest_dev[MAXMIFS]; | ||||
| +	struct in6_addr mc_origin, mc_group; | ||||
| +	ip6mr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	memset(dest_dev, 0, sizeof(dest_dev)); | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| + | ||||
| +	for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		      (cache->_c.mfc_un.res.ttls[vifi] < 255))) { | ||||
| +			continue; | ||||
| +		} | ||||
| + | ||||
| +		if (dest_if_count == MAXMIFS) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return; | ||||
| +		} | ||||
| + | ||||
| +		if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return; | ||||
| +		} | ||||
| + | ||||
| +		dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; | ||||
| +		dest_if_count++; | ||||
| +	} | ||||
| + | ||||
| +	memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr)); | ||||
| +	memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr)); | ||||
| +	read_unlock(&mrt_lock); | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (!offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return; | ||||
| +	} | ||||
| + | ||||
| +	offload_update_cb_f(&mc_group, &mc_origin, dest_if_count, dest_dev, | ||||
| +			    IP6MR_MFC_EVENT_UPDATE); | ||||
| +	rcu_read_unlock(); | ||||
| +} | ||||
| + | ||||
| +/* ip6mr_sync_entry_delete() | ||||
| + * Call the registered offload callback to inform of a multicast route entry | ||||
| + * delete event | ||||
| + */ | ||||
| +static void ip6mr_sync_entry_delete(struct in6_addr *mc_origin, | ||||
| +				    struct in6_addr *mc_group) | ||||
| +{ | ||||
| +	ip6mr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (!offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return; | ||||
| +	} | ||||
| + | ||||
| +	offload_update_cb_f(mc_group, mc_origin, 0, NULL, | ||||
| +			    IP6MR_MFC_EVENT_DELETE); | ||||
| +	rcu_read_unlock(); | ||||
| +} | ||||
| + | ||||
| +/* ip6mr_register_mfc_event_offload_callback() | ||||
| + * Register the IPv6 multicast update callback for offload modules | ||||
| + */ | ||||
| +bool ip6mr_register_mfc_event_offload_callback( | ||||
| +		ip6mr_mfc_event_offload_callback_t mfc_offload_cb) | ||||
| +{ | ||||
| +	ip6mr_mfc_event_offload_callback_t offload_update_cb_f; | ||||
| + | ||||
| +	rcu_read_lock(); | ||||
| +	offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); | ||||
| + | ||||
| +	if (offload_update_cb_f) { | ||||
| +		rcu_read_unlock(); | ||||
| +		return false; | ||||
| +	} | ||||
| +	rcu_read_unlock(); | ||||
| + | ||||
| +	spin_lock(&lock); | ||||
| +	rcu_assign_pointer(ip6mr_mfc_event_offload_callback, mfc_offload_cb); | ||||
| +	spin_unlock(&lock); | ||||
| +	synchronize_rcu(); | ||||
| +	return true; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ip6mr_register_mfc_event_offload_callback); | ||||
| + | ||||
| +/* ip6mr_unregister_mfc_event_offload_callback() | ||||
| + * De-register the IPv6 multicast update callback for offload modules | ||||
| + */ | ||||
| +void ip6mr_unregister_mfc_event_offload_callback(void) | ||||
| +{ | ||||
| +	spin_lock(&lock); | ||||
| +	rcu_assign_pointer(ip6mr_mfc_event_offload_callback, NULL); | ||||
| +	spin_unlock(&lock); | ||||
| +	synchronize_rcu(); | ||||
| +} | ||||
| +EXPORT_SYMBOL(ip6mr_unregister_mfc_event_offload_callback); | ||||
| + | ||||
| +/* ip6mr_find_mfc_entry() | ||||
| + * Return the destination interface list for a particular multicast flow, and | ||||
| + * the number of interfaces in the list | ||||
| + */ | ||||
| +int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin, | ||||
| +			 struct in6_addr *group, u32 max_dest_cnt, | ||||
| +			 u32 dest_dev[]) | ||||
| +{ | ||||
| +	int vifi, dest_if_count = 0; | ||||
| +	struct mr_table *mrt; | ||||
| +	struct mfc6_cache *cache; | ||||
| + | ||||
| +	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||||
| +	if (!mrt) | ||||
| +		return -ENOENT; | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| +	cache = ip6mr_cache_find(mrt, origin, group); | ||||
| +	if (!cache) { | ||||
| +		read_unlock(&mrt_lock); | ||||
| +		return -ENOENT; | ||||
| +	} | ||||
| + | ||||
| +	for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		      (cache->_c.mfc_un.res.ttls[vifi] < 255))) { | ||||
| +			continue; | ||||
| +		} | ||||
| + | ||||
| +		/* We have another valid destination interface entry. Check if | ||||
| +		 * the number of the destination interfaces for the route is | ||||
| +		 * exceeding the size of the array given to us | ||||
| +		 */ | ||||
| +		if (dest_if_count == max_dest_cnt) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return -EINVAL; | ||||
| +		} | ||||
| + | ||||
| +		if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +			read_unlock(&mrt_lock); | ||||
| +			return -EINVAL; | ||||
| +		} | ||||
| + | ||||
| +		dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; | ||||
| +		dest_if_count++; | ||||
| +	} | ||||
| +	read_unlock(&mrt_lock); | ||||
| + | ||||
| +	return dest_if_count; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ip6mr_find_mfc_entry); | ||||
| + | ||||
| +/* ip6mr_mfc_stats_update() | ||||
| + * Update the MFC/VIF statistics for offloaded flows | ||||
| + */ | ||||
| +int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin, | ||||
| +			   struct in6_addr *group, u64 pkts_in, | ||||
| +			   u64 bytes_in, uint64_t pkts_out, | ||||
| +			   u64 bytes_out) | ||||
| +{ | ||||
| +	int vif, vifi; | ||||
| +	struct mr_table *mrt; | ||||
| +	struct mfc6_cache *cache; | ||||
| + | ||||
| +	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||||
| + | ||||
| +	if (!mrt) | ||||
| +		return -ENOENT; | ||||
| + | ||||
| +	read_lock(&mrt_lock); | ||||
| +	cache = ip6mr_cache_find(mrt, origin, group); | ||||
| +	if (!cache) { | ||||
| +		read_unlock(&mrt_lock); | ||||
| +		return -ENOENT; | ||||
| +	} | ||||
| + | ||||
| +	vif = cache->_c.mfc_parent; | ||||
| + | ||||
| +	if (!VIF_EXISTS(mrt, vif)) { | ||||
| +		read_unlock(&mrt_lock); | ||||
| +		return -EINVAL; | ||||
| +	} | ||||
| + | ||||
| +	mrt->vif_table[vif].pkt_in += pkts_in; | ||||
| +	mrt->vif_table[vif].bytes_in += bytes_in; | ||||
| +	cache->_c.mfc_un.res.pkt += pkts_out; | ||||
| +	cache->_c.mfc_un.res.bytes += bytes_out; | ||||
| + | ||||
| +	for (vifi = cache->_c.mfc_un.res.minvif; | ||||
| +			vifi < cache->_c.mfc_un.res.maxvif; vifi++) { | ||||
| +		if ((cache->_c.mfc_un.res.ttls[vifi] > 0) && | ||||
| +		    (cache->_c.mfc_un.res.ttls[vifi] < 255)) { | ||||
| +			if (!VIF_EXISTS(mrt, vifi)) { | ||||
| +				read_unlock(&mrt_lock); | ||||
| +				return -EINVAL; | ||||
| +			} | ||||
| +			mrt->vif_table[vifi].pkt_out += pkts_out; | ||||
| +			mrt->vif_table[vifi].bytes_out += bytes_out; | ||||
| +		} | ||||
| +	} | ||||
| + | ||||
| +	read_unlock(&mrt_lock); | ||||
| +	return 0; | ||||
| +} | ||||
| +EXPORT_SYMBOL(ip6mr_mfc_stats_update); | ||||
| +/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  static struct mr_table *ip6mr_new_table(struct net *net, u32 id) | ||||
|  { | ||||
|  	struct mr_table *mrt; | ||||
| @@ -1215,6 +1447,7 @@ static int ip6mr_mfc_delete(struct mr_ta | ||||
|  			    int parent) | ||||
|  { | ||||
|  	struct mfc6_cache *c; | ||||
| +	struct in6_addr mc_origin, mc_group; /* QCA qca-mcs support */ | ||||
|   | ||||
|  	/* The entries are added/deleted only under RTNL */ | ||||
|  	rcu_read_lock(); | ||||
| @@ -1223,6 +1456,12 @@ static int ip6mr_mfc_delete(struct mr_ta | ||||
|  	rcu_read_unlock(); | ||||
|  	if (!c) | ||||
|  		return -ENOENT; | ||||
| + | ||||
| +	/* QCA qca-mcs support - Start */ | ||||
| +	memcpy(&mc_origin, &c->mf6c_origin, sizeof(struct in6_addr)); | ||||
| +	memcpy(&mc_group, &c->mf6c_mcastgrp, sizeof(struct in6_addr)); | ||||
| +	/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  	rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params); | ||||
|  	list_del_rcu(&c->_c.list); | ||||
|   | ||||
| @@ -1230,6 +1469,12 @@ static int ip6mr_mfc_delete(struct mr_ta | ||||
|  				       FIB_EVENT_ENTRY_DEL, c, mrt->id); | ||||
|  	mr6_netlink_event(mrt, c, RTM_DELROUTE); | ||||
|  	mr_cache_put(&c->_c); | ||||
| + | ||||
| +	/* QCA qca-mcs support - Start */ | ||||
| +	/* Inform offload modules of the delete event */ | ||||
| +	ip6mr_sync_entry_delete(&mc_origin, &mc_group); | ||||
| +	/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  	return 0; | ||||
|  } | ||||
|   | ||||
| @@ -1439,6 +1684,12 @@ static int ip6mr_mfc_add(struct net *net | ||||
|  		call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, | ||||
|  					       c, mrt->id); | ||||
|  		mr6_netlink_event(mrt, c, RTM_NEWROUTE); | ||||
| + | ||||
| +		/* QCA qca-mcs support - Start */ | ||||
| +		/* Inform offload modules of the update event */ | ||||
| +		ip6mr_sync_entry_update(mrt, c); | ||||
| +		/* QCA qca-mcs support - End */ | ||||
| + | ||||
|  		return 0; | ||||
|  	} | ||||
|   | ||||
| @@ -1501,6 +1752,10 @@ static int ip6mr_mfc_add(struct net *net | ||||
|   | ||||
|  static void mroute_clean_tables(struct mr_table *mrt, int flags) | ||||
|  { | ||||
| +	/* QCA qca-mcs support - Start */ | ||||
| +	struct mfc6_cache *cache; | ||||
| +	struct in6_addr mc_origin, mc_group; | ||||
| +	/* QCA qca-mcs support - End */ | ||||
|  	struct mr_mfc *c, *tmp; | ||||
|  	LIST_HEAD(list); | ||||
|  	int i; | ||||
| @@ -1523,13 +1778,23 @@ static void mroute_clean_tables(struct m | ||||
|  			if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) || | ||||
|  			    (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC))) | ||||
|  				continue; | ||||
| +			/* QCA qca-mcs support - Start */ | ||||
| +			cache = (struct mfc6_cache *)c; | ||||
| +			memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr)); | ||||
| +			memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr)); | ||||
| +			/* QCA qca-mcs support - End */ | ||||
|  			rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); | ||||
|  			list_del_rcu(&c->list); | ||||
|  			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), | ||||
|  						       FIB_EVENT_ENTRY_DEL, | ||||
| -						       (struct mfc6_cache *)c, mrt->id); | ||||
| -			mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); | ||||
| +						       cache, mrt->id); | ||||
| +			mr6_netlink_event(mrt, cache, RTM_DELROUTE); | ||||
|  			mr_cache_put(c); | ||||
| + | ||||
| +			/* QCA qca-mcs support - Start */ | ||||
| +			/* Inform offload modules of the delete event */ | ||||
| +			ip6mr_sync_entry_delete(&mc_origin, &mc_group); | ||||
| +			/* QCA qca-mcs support - End */ | ||||
|  		} | ||||
|  	} | ||||
|   | ||||
| @@ -0,0 +1,111 @@ | ||||
| --- a/crypto/authenc.c | ||||
| +++ b/crypto/authenc.c | ||||
| @@ -417,6 +417,8 @@ static int crypto_authenc_create(struct | ||||
|  		     enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||||
|  		goto err_free_inst; | ||||
|   | ||||
| +	inst->alg.base.cra_flags |= (auth_base->cra_flags | | ||||
| +				    enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG; | ||||
|  	inst->alg.base.cra_priority = enc->base.cra_priority * 10 + | ||||
|  				      auth_base->cra_priority; | ||||
|  	inst->alg.base.cra_blocksize = enc->base.cra_blocksize; | ||||
| --- a/include/linux/crypto.h | ||||
| +++ b/include/linux/crypto.h | ||||
| @@ -101,6 +101,11 @@ | ||||
|  #define CRYPTO_NOLOAD			0x00008000 | ||||
|   | ||||
|  /* | ||||
| + * Set this flag if algorithm does not support SG list transforms | ||||
| + */ | ||||
| +#define CRYPTO_ALG_NOSUPP_SG		0x0000c000 | ||||
| + | ||||
| +/* | ||||
|   * The algorithm may allocate memory during request processing, i.e. during | ||||
|   * encryption, decryption, or hashing.  Users can request an algorithm with this | ||||
|   * flag unset if they can't handle memory allocation failures. | ||||
| --- a/net/ipv4/esp4.c | ||||
| +++ b/net/ipv4/esp4.c | ||||
| @@ -659,6 +659,7 @@ static int esp_output(struct xfrm_state | ||||
|  	struct ip_esp_hdr *esph; | ||||
|  	struct crypto_aead *aead; | ||||
|  	struct esp_info esp; | ||||
| +	bool nosupp_sg; | ||||
|   | ||||
|  	esp.inplace = true; | ||||
|   | ||||
| @@ -670,6 +671,11 @@ static int esp_output(struct xfrm_state | ||||
|  	aead = x->data; | ||||
|  	alen = crypto_aead_authsize(aead); | ||||
|   | ||||
| +	nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; | ||||
| +	if (nosupp_sg && skb_linearize(skb)) { | ||||
| +		return -ENOMEM; | ||||
| +	} | ||||
| + | ||||
|  	esp.tfclen = 0; | ||||
|  	if (x->tfcpad) { | ||||
|  		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); | ||||
| @@ -895,6 +901,7 @@ static int esp_input(struct xfrm_state * | ||||
|  	u8 *iv; | ||||
|  	struct scatterlist *sg; | ||||
|  	int err = -EINVAL; | ||||
| +	bool nosupp_sg; | ||||
|   | ||||
|  	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) | ||||
|  		goto out; | ||||
| @@ -902,6 +909,12 @@ static int esp_input(struct xfrm_state * | ||||
|  	if (elen <= 0) | ||||
|  		goto out; | ||||
|   | ||||
| +	nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; | ||||
| +	if (nosupp_sg && skb_linearize(skb)) { | ||||
| +		err = -ENOMEM; | ||||
| +		goto out; | ||||
| +	} | ||||
| + | ||||
|  	assoclen = sizeof(struct ip_esp_hdr); | ||||
|  	seqhilen = 0; | ||||
|   | ||||
| --- a/net/ipv6/esp6.c | ||||
| +++ b/net/ipv6/esp6.c | ||||
| @@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state | ||||
|  	struct ip_esp_hdr *esph; | ||||
|  	struct crypto_aead *aead; | ||||
|  	struct esp_info esp; | ||||
| +	bool nosupp_sg; | ||||
|   | ||||
|  	esp.inplace = true; | ||||
|   | ||||
| @@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state | ||||
|  	aead = x->data; | ||||
|  	alen = crypto_aead_authsize(aead); | ||||
|   | ||||
| +	nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; | ||||
| +	if (nosupp_sg && skb_linearize(skb)) { | ||||
| +		return -ENOMEM; | ||||
| +	} | ||||
| + | ||||
|  	esp.tfclen = 0; | ||||
|  	if (x->tfcpad) { | ||||
|  		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); | ||||
| @@ -938,6 +944,7 @@ static int esp6_input(struct xfrm_state | ||||
|  	__be32 *seqhi; | ||||
|  	u8 *iv; | ||||
|  	struct scatterlist *sg; | ||||
| +	bool nosupp_sg; | ||||
|   | ||||
|  	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { | ||||
|  		ret = -EINVAL; | ||||
| @@ -949,6 +956,12 @@ static int esp6_input(struct xfrm_state | ||||
|  		goto out; | ||||
|  	} | ||||
|   | ||||
| +	nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; | ||||
| +	if (nosupp_sg && skb_linearize(skb)) { | ||||
| +		ret = -ENOMEM; | ||||
| +		goto out; | ||||
| +	} | ||||
| + | ||||
|  	assoclen = sizeof(struct ip_esp_hdr); | ||||
|  	seqhilen = 0; | ||||
|   | ||||
| @@ -0,0 +1,24 @@ | ||||
| --- a/arch/arm/boot/dts/qcom-ipq8064.dtsi | ||||
| +++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi | ||||
| @@ -1513,7 +1513,6 @@ | ||||
|  			qcom,load-addr = <0x40000000>; | ||||
|  			qcom,turbo-frequency; | ||||
|   | ||||
| -			qcom,bridge-enabled; | ||||
|  			qcom,gre-enabled; | ||||
|  			qcom,gre-redir-enabled; | ||||
|  			qcom,gre_tunnel_enabled; | ||||
| @@ -1532,12 +1531,10 @@ | ||||
|  			qcom,vlan-enabled; | ||||
|  			qcom,wlan-dataplane-offload-enabled; | ||||
|  			qcom,wlanredirect-enabled; | ||||
| -			qcom,pxvlan-enabled; | ||||
|  			qcom,vxlan-enabled; | ||||
|  			qcom,match-enabled; | ||||
|  			qcom,mirror-enabled; | ||||
| -			qcom,rmnet-enabled; | ||||
| -			qcom,clmap-enabled; | ||||
| +			qcom,tstamp-enabled; | ||||
|  		}; | ||||
|   | ||||
|  		nss1: nss@40800000 { | ||||
| @@ -0,0 +1,160 @@ | ||||
| --- a/include/linux/if_bridge.h | ||||
| +++ b/include/linux/if_bridge.h | ||||
| @@ -198,7 +198,6 @@ extern struct net_device *br_port_dev_ge | ||||
|  					  unsigned char *addr, | ||||
|  					  struct sk_buff *skb, | ||||
|  					  unsigned int cookie); | ||||
| -extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr); | ||||
|  extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid); | ||||
|  extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev, | ||||
|  						     const char *addr, | ||||
| --- a/net/bridge/br_fdb.c | ||||
| +++ b/net/bridge/br_fdb.c | ||||
| @@ -1465,26 +1465,6 @@ void br_fdb_clear_offload(const struct n | ||||
|  EXPORT_SYMBOL_GPL(br_fdb_clear_offload); | ||||
|   | ||||
|  /* QCA NSS ECM support - Start */ | ||||
| -/* Refresh FDB entries for bridge packets being forwarded by offload engines */ | ||||
| -void br_refresh_fdb_entry(struct net_device *dev, const char *addr) | ||||
| -{ | ||||
| -	struct net_bridge_port *p = br_port_get_rcu(dev); | ||||
| - | ||||
| -	if (!p || p->state == BR_STATE_DISABLED) | ||||
| -		return; | ||||
| - | ||||
| -	if (!is_valid_ether_addr(addr)) { | ||||
| -		pr_info("bridge: Attempt to refresh with invalid ether address %pM\n", | ||||
| -			addr); | ||||
| -		return; | ||||
| -	} | ||||
| - | ||||
| -	rcu_read_lock(); | ||||
| -	br_fdb_update(p->br, p, addr, 0, true); | ||||
| -	rcu_read_unlock(); | ||||
| -} | ||||
| -EXPORT_SYMBOL_GPL(br_refresh_fdb_entry); | ||||
| - | ||||
|  /* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */ | ||||
|  void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid) | ||||
|  { | ||||
| --- a/drivers/net/ppp/ppp_generic.c | ||||
| +++ b/drivers/net/ppp/ppp_generic.c | ||||
| @@ -3772,34 +3772,6 @@ int ppp_channel_get_proto_version(struct | ||||
|  } | ||||
|  EXPORT_SYMBOL(ppp_channel_get_proto_version); | ||||
|   | ||||
| -/* ppp_channel_hold() | ||||
| - *	Call this to hold a channel. | ||||
| - * | ||||
| - * Returns true on success or false if the hold could not happen. | ||||
| - * | ||||
| - * NOTE: chan must be protected against destruction during this call - | ||||
| - * either by correct locking etc. or because you already have an implicit | ||||
| - * or explicit hold to the channel already and this is an additional hold. | ||||
| - */ | ||||
| -bool ppp_channel_hold(struct ppp_channel *chan) | ||||
| -{ | ||||
| -	if (!chan->ops->hold) | ||||
| -		return false; | ||||
| - | ||||
| -	chan->ops->hold(chan); | ||||
| -	return true; | ||||
| -} | ||||
| -EXPORT_SYMBOL(ppp_channel_hold); | ||||
| - | ||||
| -/* ppp_channel_release() | ||||
| - *	Call this to release a hold you have upon a channel | ||||
| - */ | ||||
| -void ppp_channel_release(struct ppp_channel *chan) | ||||
| -{ | ||||
| -	chan->ops->release(chan); | ||||
| -} | ||||
| -EXPORT_SYMBOL(ppp_channel_release); | ||||
| - | ||||
|  /* Check if ppp xmit lock is on hold */ | ||||
|  bool ppp_is_xmit_locked(struct net_device *dev) | ||||
|  { | ||||
| --- a/include/linux/ppp_channel.h | ||||
| +++ b/include/linux/ppp_channel.h | ||||
| @@ -99,12 +99,6 @@ extern int ppp_channel_get_protocol(stru | ||||
|  /* Call this get protocol version */ | ||||
|  extern int ppp_channel_get_proto_version(struct ppp_channel *); | ||||
|   | ||||
| -/* Call this to hold a channel */ | ||||
| -extern bool ppp_channel_hold(struct ppp_channel *); | ||||
| - | ||||
| -/* Call this to release a hold you have upon a channel */ | ||||
| -extern void ppp_channel_release(struct ppp_channel *); | ||||
| - | ||||
|  /* Release hold on PPP channels */ | ||||
|  extern void ppp_release_channels(struct ppp_channel *channels[], | ||||
|  				 unsigned int chan_sz); | ||||
| --- a/drivers/net/ppp/pptp.c | ||||
| +++ b/drivers/net/ppp/pptp.c | ||||
| @@ -92,32 +92,6 @@ static int lookup_chan_dst(u16 call_id, | ||||
|  	return i < MAX_CALLID; | ||||
|  } | ||||
|   | ||||
| -/* Search a pptp session based on local call id, local and remote ip address */ | ||||
| -static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr) | ||||
| -{ | ||||
| -	struct pppox_sock *sock; | ||||
| -	int i = 1; | ||||
| - | ||||
| -	rcu_read_lock(); | ||||
| -	for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { | ||||
| -		sock = rcu_dereference(callid_sock[i]); | ||||
| -		if (!sock) | ||||
| -			continue; | ||||
| - | ||||
| -		if (sock->proto.pptp.src_addr.call_id == call_id && | ||||
| -		    sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr && | ||||
| -		    sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) { | ||||
| -			sock_hold(sk_pppox(sock)); | ||||
| -			memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt)); | ||||
| -			sock_put(sk_pppox(sock)); | ||||
| -			rcu_read_unlock(); | ||||
| -			return 0; | ||||
| -		} | ||||
| -	} | ||||
| -	rcu_read_unlock(); | ||||
| -	return -EINVAL; | ||||
| -} | ||||
| - | ||||
|  /* Search a pptp session based on peer call id and peer ip address */ | ||||
|  static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr) | ||||
|  { | ||||
| @@ -748,20 +722,6 @@ int pptp_session_find(struct pptp_opt *o | ||||
|  } | ||||
|  EXPORT_SYMBOL(pptp_session_find); | ||||
|   | ||||
| -/* pptp_session_find_by_src_callid() | ||||
| - *	Search and return a PPTP session info based on src callid and IP | ||||
| - *	address. The function accepts the parameters in network byte order. | ||||
| - */ | ||||
| -int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id, | ||||
| -		      __be32 daddr, __be32 saddr) | ||||
| -{ | ||||
| -	if (!opt) | ||||
| -		return -EINVAL; | ||||
| - | ||||
| -	return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr); | ||||
| -} | ||||
| -EXPORT_SYMBOL(pptp_session_find_by_src_callid); | ||||
| - | ||||
|   /* Function to change the offload mode true/false for a PPTP session */ | ||||
|  static int pptp_set_offload_mode(bool accel_mode, | ||||
|  				 __be16 peer_call_id, __be32 peer_ip_addr) | ||||
| --- a/include/linux/if_pppox.h | ||||
| +++ b/include/linux/if_pppox.h | ||||
| @@ -111,10 +111,6 @@ typedef int (*pptp_gre_seq_offload_callb | ||||
|  extern int pppoe_channel_addressing_get(struct ppp_channel *chan, | ||||
|  					 struct pppoe_opt *addressing); | ||||
|   | ||||
| -/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */ | ||||
| -extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id, | ||||
| -			 __be32 daddr, __be32 saddr); | ||||
| - | ||||
|  /* Lookup PPTP session info and return PPTP session using dip and peer call id */ | ||||
|  extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id, | ||||
|  			     __be32 peer_ip_addr); | ||||
		Reference in New Issue
	
	Block a user
	 ACwifidude
					ACwifidude