ipq806x: NSS Hardware Offloading qdisc patches

This commit is contained in:
ACwifidude
2022-09-30 11:21:00 -05:00
committed by Lucas Asvio
parent 4cabdd199b
commit c98e2dc975
6 changed files with 1216 additions and 0 deletions

View File

@@ -0,0 +1,44 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -687,6 +687,7 @@ typedef unsigned char *sk_buff_data_t;
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_skip_classify_offload: do not classify packet set by offload IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
@@ -902,6 +903,8 @@ struct sk_buff {
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
__u8 tc_at_ingress:1;
+ __u8 tc_skip_classify_offload:1;
+ __u16 tc_verd_qca_nss; /* QCA NSS Qdisc Support */
#endif
__u8 redirected:1;
#ifdef CONFIG_NET_REDIRECT
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -136,6 +136,7 @@ enum tca_id {
TCA_ID_MPLS,
TCA_ID_CT,
TCA_ID_GATE,
+ TCA_ID_MIRRED_NSS, /* QCA NSS Qdisc IGS Support */
/* other actions go here */
__TCA_ID_MAX = 255
};
@@ -776,4 +777,14 @@ enum {
TCF_EM_OPND_LT
};
+/* QCA NSS Qdisc Support - Start */
+#define _TC_MAKE32(x) ((x))
+#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n))
+
+#define TC_NCLS _TC_MAKEMASK1(8)
+#define TC_NCLS_NSS _TC_MAKEMASK1(12)
+#define SET_TC_NCLS_NSS(v) ( TC_NCLS_NSS | ((v) & ~TC_NCLS_NSS))
+#define CLR_TC_NCLS_NSS(v) ( (v) & ~TC_NCLS_NSS)
+/* QCA NSS Qdisc Support - End */
+
#endif

View File

@@ -0,0 +1,442 @@
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -17,6 +17,7 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
+ unsigned long cust_data;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -125,6 +125,31 @@ resched:
}
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats)
+{
+ struct ifb_dev_private *dp;
+ struct ifb_q_private *txp;
+
+ if (!dev || !offload_stats) {
+ return;
+ }
+
+ if (!(dev->priv_flags_ext & IFF_EXT_IFB)) {
+ return;
+ }
+
+ dp = netdev_priv(dev);
+ txp = dp->tx_private;
+
+ u64_stats_update_begin(&txp->rsync);
+ txp->rx_packets += offload_stats->rx_packets;
+ txp->rx_bytes += offload_stats->rx_bytes;
+ txp->tx_packets += offload_stats->tx_packets;
+ txp->tx_bytes += offload_stats->tx_bytes;
+ u64_stats_update_end(&txp->rsync);
+}
+EXPORT_SYMBOL(ifb_update_offload_stats);
+
static void ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -224,6 +249,7 @@ static void ifb_setup(struct net_device
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */
netif_keep_dst(dev);
eth_hw_addr_random(dev);
dev->needs_free_netdev = true;
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4749,6 +4749,15 @@ void dev_uc_flush(struct net_device *dev
void dev_uc_init(struct net_device *dev);
/**
+ * ifb_update_offload_stats - Update the IFB interface stats
+ * @dev: IFB device to update the stats
+ * @offload_stats: per CPU stats structure
+ *
+ * Allows update of IFB stats when flows are offloaded to an accelerator.
+ **/
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats);
+
+/**
* __dev_uc_sync - Synchonize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
@@ -5298,6 +5307,11 @@ static inline bool netif_is_failover_sla
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}
+static inline bool netif_is_ifb_dev(const struct net_device *dev)
+{
+ return dev->priv_flags_ext & IFF_EXT_IFB;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1265,4 +1265,248 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* QCA NSS Clients Support - Start */
+enum {
+ TCA_NSS_ACCEL_MODE_NSS_FW,
+ TCA_NSS_ACCEL_MODE_PPE,
+ TCA_NSS_ACCEL_MODE_MAX
+};
+
+/* NSSFIFO section */
+
+enum {
+ TCA_NSSFIFO_UNSPEC,
+ TCA_NSSFIFO_PARMS,
+ __TCA_NSSFIFO_MAX
+};
+
+#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1)
+
+struct tc_nssfifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRED section */
+
+enum {
+ TCA_NSSWRED_UNSPEC,
+ TCA_NSSWRED_PARMS,
+ __TCA_NSSWRED_MAX
+};
+
+#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1)
+#define NSSWRED_CLASS_MAX 6
+struct tc_red_alg_parameter {
+ __u32 min; /* qlen_avg < min: pkts are all enqueued */
+ __u32 max; /* qlen_avg > max: pkts are all dropped */
+ __u32 probability;/* Drop probability at qlen_avg = max */
+ __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */
+};
+
+struct tc_nsswred_traffic_class {
+ __u32 limit; /* Queue length */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* Parameters for RED alg */
+};
+
+/*
+ * Weight modes for WRED
+ */
+enum tc_nsswred_weight_modes {
+ TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */
+ TC_NSSWRED_WEIGHT_MODES, /* Must be last */
+};
+
+struct tc_nsswred_qopt {
+ __u32 limit; /* Queue length */
+ enum tc_nsswred_weight_modes weight_mode;
+ /* Weight mode */
+ __u32 traffic_classes; /* How many traffic classes: DPs */
+ __u32 def_traffic_class; /* Default traffic if no match: def_DP */
+ __u32 traffic_id; /* The traffic id to be configured: DP */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* RED algorithm parameters */
+ struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX];
+ /* Traffic settings for dumpping */
+ __u8 ecn; /* Setting ECN bit or dropping */
+ __u8 set_default; /* Sets qdisc to be the default for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSCODEL section */
+
+enum {
+ TCA_NSSCODEL_UNSPEC,
+ TCA_NSSCODEL_PARMS,
+ __TCA_NSSCODEL_MAX
+};
+
+#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1)
+
+struct tc_nsscodel_qopt {
+ __u32 target; /* Acceptable queueing delay */
+ __u32 limit; /* Max number of packets that can be held in the queue */
+ __u32 interval; /* Monitoring interval */
+ __u32 flows; /* Number of flow buckets */
+ __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */
+ __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+struct tc_nsscodel_xstats {
+ __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
+ __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */
+};
+
+/* NSSFQ_CODEL section */
+
+struct tc_nssfq_codel_xstats {
+ __u32 new_flow_count; /* Total number of new flows seen */
+ __u32 new_flows_len; /* Current number of new flows */
+ __u32 old_flows_len; /* Current number of old flows */
+ __u32 ecn_mark; /* Number of packets marked with ECN */
+ __u32 drop_overlimit; /* Number of packets dropped due to overlimit */
+ __u32 maxpacket; /* The largest packet seen so far in the queue */
+};
+
+/* NSSTBL section */
+
+enum {
+ TCA_NSSTBL_UNSPEC,
+ TCA_NSSTBL_PARMS,
+ __TCA_NSSTBL_MAX
+};
+
+#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1)
+
+struct tc_nsstbl_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Limiting rate of TBF */
+ __u32 peakrate; /* Maximum rate at which TBF is allowed to send */
+ __u32 mtu; /* Max size of packet, or minumim burst size */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSPRIO section */
+
+#define TCA_NSSPRIO_MAX_BANDS 256
+
+enum {
+ TCA_NSSPRIO_UNSPEC,
+ TCA_NSSPRIO_PARMS,
+ __TCA_NSSPRIO_MAX
+};
+
+#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1)
+
+struct tc_nssprio_qopt {
+ __u32 bands; /* Number of bands */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBF section */
+
+enum {
+ TCA_NSSBF_UNSPEC,
+ TCA_NSSBF_CLASS_PARMS,
+ TCA_NSSBF_QDISC_PARMS,
+ __TCA_NSSBF_MAX
+};
+
+#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1)
+
+struct tc_nssbf_class_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 mtu; /* MTU of the associated interface */
+ __u32 quantum; /* Quantum allocation for DRR */
+};
+
+struct tc_nssbf_qopt {
+ __u16 defcls; /* Default class value */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRR section */
+
+enum {
+ TCA_NSSWRR_UNSPEC,
+ TCA_NSSWRR_CLASS_PARMS,
+ TCA_NSSWRR_QDISC_PARMS,
+ __TCA_NSSWRR_MAX
+};
+
+#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1)
+
+struct tc_nsswrr_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswrr_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWFQ section */
+
+enum {
+ TCA_NSSWFQ_UNSPEC,
+ TCA_NSSWFQ_CLASS_PARMS,
+ TCA_NSSWFQ_QDISC_PARMS,
+ __TCA_NSSWFQ_MAX
+};
+
+#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1)
+
+struct tc_nsswfq_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswfq_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSHTB section */
+
+enum {
+ TCA_NSSHTB_UNSPEC,
+ TCA_NSSHTB_CLASS_PARMS,
+ TCA_NSSHTB_QDISC_PARMS,
+ __TCA_NSSHTB_MAX
+};
+
+#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1)
+
+struct tc_nsshtb_class_qopt {
+ __u32 burst; /* Allowed burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 cburst; /* Maximum burst size */
+ __u32 crate; /* Maximum bandwidth for this class */
+ __u32 quantum; /* Quantum allocation for DRR */
+ __u32 priority; /* Priority value associated with this class */
+ __u32 overhead; /* Overhead in bytes per packet */
+};
+
+struct tc_nsshtb_qopt {
+ __u32 r2q; /* Rate to quantum ratio */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBLACKHOLE section */
+
+enum {
+ TCA_NSSBLACKHOLE_UNSPEC,
+ TCA_NSSBLACKHOLE_PARMS,
+ __TCA_NSSBLACKHOLE_MAX
+};
+
+#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1)
+
+struct tc_nssblackhole_qopt {
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+/* QCA NSS Clients Support - End */
#endif
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -2304,4 +2304,26 @@ static int __init pktsched_init(void)
return 0;
}
+/* QCA NSS Qdisc Support - Start */
+bool tcf_destroy(struct tcf_proto *tp, bool force)
+{
+ tp->ops->destroy(tp, force, NULL);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+
+ return true;
+}
+
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+ struct tcf_proto *tp;
+
+ while ((tp = rtnl_dereference(*fl)) != NULL) {
+ RCU_INIT_POINTER(*fl, tp->next);
+ tcf_destroy(tp, true);
+ }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
+/* QCA NSS Qdisc Support - End */
+
subsys_initcall(pktsched_init);
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1008,7 +1008,7 @@ static void qdisc_free_cb(struct rcu_hea
qdisc_free(q);
}
-static void qdisc_destroy(struct Qdisc *qdisc)
+void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
@@ -1031,6 +1031,7 @@ static void qdisc_destroy(struct Qdisc *
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
+EXPORT_SYMBOL(qdisc_destroy);
void qdisc_put(struct Qdisc *qdisc)
{
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -87,6 +87,7 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -738,6 +739,40 @@ static inline bool skb_skip_tc_classify(
return false;
}
+/*
+ * Set skb classify bit field.
+ */
+static inline void skb_set_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 1;
+#endif
+}
+
+/*
+ * Clear skb classify bit field.
+ */
+static inline void skb_clear_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 0;
+#endif
+}
+
+/*
+ * Skip skb processing if sent from ifb dev.
+ */
+static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_skip_classify_offload) {
+ skb_clear_tc_classify_offload(skb);
+ return true;
+ }
+#endif
+ return false;
+}
+
/* Reset all TX qdiscs greater than index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
@@ -1336,4 +1371,9 @@ void mini_qdisc_pair_block_init(struct m
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+/* QCA NSS Qdisc Support - Start */
+void qdisc_destroy(struct Qdisc *qdisc);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+/* QCA NSS Qdisc Support - End */
+
#endif

View File

@@ -0,0 +1,46 @@
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -400,6 +400,31 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct l2tp_stats *stats)
+{
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &tunnel->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &tunnel->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &tunnel->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &tunnel->stats.tx_bytes);
+
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &session->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &session->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &session->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &session->stats.tx_bytes);
+}
+EXPORT_SYMBOL_GPL(l2tp_stats_update);
+
+
/*****************************************************************************
* Receive data handling
*****************************************************************************/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -232,6 +232,9 @@ struct l2tp_session *l2tp_session_get_nt
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View File

@@ -0,0 +1,488 @@
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -38,6 +38,7 @@ struct pptp_opt {
u32 ack_sent, ack_recv;
u32 seq_sent, seq_recv;
int ppp_flags;
+ bool pptp_offload_mode;
};
#include <net/sock.h>
@@ -102,8 +103,40 @@ struct pppoe_channel_ops {
int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
};
+/* PPTP client callback */
+typedef int (*pptp_gre_seq_offload_callback_t)(struct sk_buff *skb,
+ struct net_device *pptp_dev);
+
/* Return PPPoE channel specific addressing information */
extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
struct pppoe_opt *addressing);
+/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */
+extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr);
+
+/* Lookup PPTP session info and return PPTP session using dip and peer call id */
+extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Return PPTP session information given the channel */
+extern void pptp_channel_addressing_get(struct pptp_opt *opt,
+ struct ppp_channel *chan);
+
+/* Enable the PPTP session offload flag */
+extern int pptp_session_enable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Disable the PPTP session offload flag */
+extern int pptp_session_disable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Register the PPTP GRE packets sequence number offload callback */
+extern int
+pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_client_cb);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+extern void pptp_unregister_gre_seq_offload_callback(void);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2970,6 +2970,20 @@ char *ppp_dev_name(struct ppp_channel *c
return name;
}
+/* Return the PPP net device index */
+int ppp_dev_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int ifindex = 0;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ ifindex = pch->ppp->dev->ifindex;
+ read_unlock_bh(&pch->upl);
+ }
+ return ifindex;
+}
/*
* Disconnect a channel from the generic layer.
@@ -3678,6 +3692,28 @@ void ppp_update_stats(struct net_device
ppp_recv_unlock(ppp);
}
+/* Returns true if Compression is enabled on PPP device
+ */
+bool ppp_is_cp_enabled(struct net_device *dev)
+{
+ struct ppp *ppp;
+ bool flag = false;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flag = !!(ppp->xstate & SC_COMP_RUN) || !!(ppp->rstate & SC_DECOMP_RUN);
+ ppp_unlock(ppp);
+
+ return flag;
+}
+EXPORT_SYMBOL(ppp_is_cp_enabled);
+
/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
* the device is not PPP.
*/
@@ -3869,6 +3905,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_dev_index);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -82,6 +82,9 @@ extern void ppp_unregister_channel(struc
/* Get the channel number for a channel */
extern int ppp_channel_index(struct ppp_channel *);
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
@@ -114,6 +117,7 @@ extern int ppp_hold_channels(struct net_
/* Test if ppp xmit lock is locked */
extern bool ppp_is_xmit_locked(struct net_device *dev);
+bool ppp_is_cp_enabled(struct net_device *dev);
/* Test if the ppp device is a multi-link ppp device */
extern int ppp_is_multilink(struct net_device *dev);
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -49,6 +49,8 @@ static struct proto pptp_sk_proto __read
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
+static pptp_gre_seq_offload_callback_t __rcu pptp_gre_offload_xmit_cb;
+
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
@@ -90,6 +92,79 @@ static int lookup_chan_dst(u16 call_id,
return i < MAX_CALLID;
}
+/* Search a pptp session based on local call id, local and remote ip address */
+static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.src_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr &&
+ sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Search a pptp session based on peer call id and peer ip address */
+static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == d_addr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* If offload mode set then this function sends all packets to
+ * offload module instead of network stack
+ */
+static int pptp_client_skb_xmit(struct sk_buff *skb,
+ struct net_device *pptp_dev)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+ int ret;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (!pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ ret = pptp_gre_offload_cb_f(skb, pptp_dev);
+ rcu_read_unlock();
+ return ret;
+}
+
static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{
@@ -145,8 +220,11 @@ static int pptp_xmit(struct ppp_channel
struct rtable *rt;
struct net_device *tdev;
+ struct net_device *pptp_dev;
struct iphdr *iph;
int max_headroom;
+ int pptp_ifindex;
+ int ret;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
@@ -155,7 +233,7 @@ static int pptp_xmit(struct ppp_channel
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
- RT_TOS(0), sk->sk_bound_dev_if);
+ RT_TOS(0), 0);
if (IS_ERR(rt))
goto tx_error;
@@ -244,7 +322,32 @@ static int pptp_xmit(struct ppp_channel
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
- ip_local_out(net, skb->sk, skb);
+ pptp_ifindex = ppp_dev_index(chan);
+
+ /* set incoming interface as the ppp interface */
+ if (skb->skb_iif)
+ skb->skb_iif = pptp_ifindex;
+
+ /* If the PPTP GRE seq number offload module is not enabled yet
+ * then sends all PPTP GRE packets through linux network stack
+ */
+ if (!opt->pptp_offload_mode) {
+ ip_local_out(net, skb->sk, skb);
+ return 1;
+ }
+
+ pptp_dev = dev_get_by_index(&init_net, pptp_ifindex);
+ if (!pptp_dev)
+ goto tx_error;
+
+ /* If PPTP offload module is enabled then forward all PPTP GRE
+ * packets to PPTP GRE offload module
+ */
+ ret = pptp_client_skb_xmit(skb, pptp_dev);
+ dev_put(pptp_dev);
+ if (ret < 0)
+ goto tx_error;
+
return 1;
tx_error:
@@ -300,6 +403,13 @@ static int pptp_rcv_core(struct sock *sk
goto drop;
payload = skb->data + headersize;
+
+ /* If offload is enabled, we expect the offload module
+ * to handle PPTP GRE sequence number checks
+ */
+ if (opt->pptp_offload_mode)
+ goto allow_packet;
+
/* check for expected sequence number */
if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
@@ -357,6 +467,7 @@ static int pptp_rcv(struct sk_buff *skb)
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
+ skb->skb_iif = ppp_dev_index(&po->chan);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
@@ -442,8 +553,7 @@ static int pptp_connect(struct socket *s
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
- IPPROTO_GRE, RT_CONN_FLAGS(sk),
- sk->sk_bound_dev_if);
+ IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
@@ -464,7 +574,7 @@ static int pptp_connect(struct socket *s
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state |= PPPOX_CONNECTED;
-
+ opt->pptp_offload_mode = false;
end:
release_sock(sk);
return error;
@@ -594,9 +704,169 @@ static int pptp_ppp_ioctl(struct ppp_cha
return err;
}
+/* pptp_channel_addressing_get()
+ * Return PPTP channel specific addressing information.
+ */
+void pptp_channel_addressing_get(struct pptp_opt *opt, struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct pppox_sock *po;
+
+ if (!opt)
+ return;
+
+ sk = (struct sock *)chan->private;
+ if (!sk)
+ return;
+
+ sock_hold(sk);
+
+ /* This is very unlikely, but check the socket is connected state */
+ if (unlikely(sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED))) {
+ sock_put(sk);
+ return;
+ }
+
+ po = pppox_sk(sk);
+ memcpy(opt, &po->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(pptp_channel_addressing_get);
+
+/* pptp_session_find()
+ * Search and return a PPTP session info based on peer callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_dst(opt, ntohs(peer_call_id), peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_find);
+
+/* pptp_session_find_by_src_callid()
+ * Search and return a PPTP session info based on src callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr);
+}
+EXPORT_SYMBOL(pptp_session_find_by_src_callid);
+
+ /* Function to change the offload mode true/false for a PPTP session */
+static int pptp_set_offload_mode(bool accel_mode,
+ __be16 peer_call_id, __be32 peer_ip_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == peer_call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == peer_ip_addr) {
+ sock_hold(sk_pppox(sock));
+ sock->proto.pptp.pptp_offload_mode = accel_mode;
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Enable the PPTP session offload flag */
+int pptp_session_enable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(true, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_enable_offload_mode);
+
+/* Disable the PPTP session offload flag */
+int pptp_session_disable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(false, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_disable_offload_mode);
+
+/* Register the offload callback function on behalf of the module which
+ * will own the sequence and acknowledgment number updates for all
+ * PPTP GRE packets. All PPTP GRE packets are then transmitted to this
+ * module after encapsulation in order to ensure the correct seq/ack
+ * fields are set in the packets before transmission. This is required
+ * when PPTP flows are offloaded to acceleration engines, in-order to
+ * ensure consistency in sequence and ack numbers between PPTP control
+ * (PPP LCP) and data packets
+ */
+int pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_gre_offload_cb)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, pptp_gre_offload_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(pptp_register_gre_seq_offload_callback);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+void pptp_unregister_gre_seq_offload_callback(void)
+{
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, NULL);
+}
+EXPORT_SYMBOL(pptp_unregister_gre_seq_offload_callback);
+
+/* pptp_hold_chan() */
+static void pptp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pptp_release_chan() */
+static void pptp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pptp_get_channel_protocol()
+ * Return the protocol type of the PPTP over PPP protocol
+ */
+static int pptp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_PPTP;
+}
+
static const struct ppp_channel_ops pptp_chan_ops = {
.start_xmit = pptp_xmit,
.ioctl = pptp_ppp_ioctl,
+ .get_channel_protocol = pptp_get_channel_protocol,
+ .hold = pptp_hold_chan,
+ .release = pptp_release_chan,
};
static struct proto pptp_sk_proto __read_mostly = {

View File

@@ -0,0 +1,77 @@
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ __u8 draft03; /* FMR using draft03 of map-e - QCA NSS Clients Support */
bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -531,4 +531,9 @@ static inline void ip_tunnel_info_opts_s
#endif /* CONFIG_INET */
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr);
+void ip6_update_offload_stats(struct net_device *dev, void *ptr);
+/* QCA NSS Clients Support - End */
+
#endif /* __NET_IP_TUNNELS_H */
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2429,6 +2429,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Client Support - Start */
+/*
+ * Update offload stats
+ */
+void ip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets += offload_stats->tx_packets;
+ tstats->tx_bytes += offload_stats->tx_bytes;
+ tstats->rx_packets += offload_stats->rx_packets;
+ tstats->rx_bytes += offload_stats->rx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ip6_update_offload_stats);
+/* QCA NSS Client Support - End */
+
struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1794,6 +1794,23 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets += offload_stats->tx_packets;
+ tstats->tx_bytes += offload_stats->tx_bytes;
+ tstats->rx_packets += offload_stats->rx_packets;
+ tstats->rx_bytes += offload_stats->rx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ipip6_update_offload_stats);
+/* QCA NSS Clients Support - End */
+
static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },

View File

@@ -0,0 +1,119 @@
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -90,6 +90,20 @@ struct vxlan_fdb {
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
+ATOMIC_NOTIFIER_HEAD(vxlan_fdb_notifier_list);
+
+void vxlan_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_register_notify);
+
+void vxlan_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_unregister_notify);
+
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
return vs->flags & VXLAN_F_COLLECT_METADATA ||
@@ -367,6 +381,7 @@ static void __vxlan_fdb_notify(struct vx
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
+ struct vxlan_fdb_event vfe;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
@@ -382,6 +397,10 @@ static void __vxlan_fdb_notify(struct vx
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ vfe.dev = vxlan->dev;
+ vfe.rdst = rd;
+ ether_addr_copy(vfe.eth_addr, fdb->eth_addr);
+ atomic_notifier_call_chain(&vxlan_fdb_notifier_list, type, (void *)&vfe);
return;
errout:
if (err < 0)
@@ -548,6 +567,18 @@ static struct vxlan_fdb *vxlan_find_mac(
return f;
}
+/* Find and update age of fdb entry corresponding to MAC. */
+void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni)
+{
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ vxlan_find_mac(vxlan, mac, vni);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+}
+EXPORT_SYMBOL(vxlan_fdb_update_mac);
+
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
@@ -2744,6 +2775,9 @@ static void vxlan_xmit_one(struct sk_buf
goto out_unlock;
}
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2814,6 +2848,9 @@ static void vxlan_xmit_one(struct sk_buf
if (err < 0)
goto tx_error;
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&local_ip.sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -293,6 +293,19 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA)
+/*
+ * Application data for fdb notifier event
+ */
+struct vxlan_fdb_event {
+ struct net_device *dev;
+ struct vxlan_rdst *rdst;
+ u8 eth_addr[ETH_ALEN];
+};
+
+extern void vxlan_fdb_register_notify(struct notifier_block *nb);
+extern void vxlan_fdb_unregister_notify(struct notifier_block *nb);
+extern void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni);
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -376,6 +389,15 @@ static inline __be32 vxlan_compute_rco(u
return vni_field;
}
+/*
+ * vxlan_get_vni()
+ * Returns the vni corresponding to tunnel
+ */
+static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun)
+{
+ return be32_to_cpu(vxlan_tun->cfg.vni);
+}
+
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
return vs->sock->sk->sk_family;