Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
0002 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
0003 
0004 #include "selq.h"
0005 #include <linux/slab.h>
0006 #include <linux/netdevice.h>
0007 #include <linux/rcupdate.h>
0008 #include "en.h"
0009 #include "en/ptp.h"
0010 #include "en/htb.h"
0011 
0012 struct mlx5e_selq_params {
0013     unsigned int num_regular_queues;
0014     unsigned int num_channels;
0015     unsigned int num_tcs;
0016     union {
0017         u8 is_special_queues;
0018         struct {
0019             bool is_htb : 1;
0020             bool is_ptp : 1;
0021         };
0022     };
0023     u16 htb_maj_id;
0024     u16 htb_defcls;
0025 };
0026 
0027 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
0028 {
0029     struct mlx5e_selq_params *init_params;
0030 
0031     selq->state_lock = state_lock;
0032 
0033     selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
0034     if (!selq->standby)
0035         return -ENOMEM;
0036 
0037     init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
0038     if (!init_params) {
0039         kvfree(selq->standby);
0040         selq->standby = NULL;
0041         return -ENOMEM;
0042     }
0043     /* Assign dummy values, so that mlx5e_select_queue won't crash. */
0044     *init_params = (struct mlx5e_selq_params) {
0045         .num_regular_queues = 1,
0046         .num_channels = 1,
0047         .num_tcs = 1,
0048         .is_htb = false,
0049         .is_ptp = false,
0050         .htb_maj_id = 0,
0051         .htb_defcls = 0,
0052     };
0053     rcu_assign_pointer(selq->active, init_params);
0054 
0055     return 0;
0056 }
0057 
0058 void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
0059 {
0060     WARN_ON_ONCE(selq->is_prepared);
0061 
0062     kvfree(selq->standby);
0063     selq->standby = NULL;
0064     selq->is_prepared = true;
0065 
0066     mlx5e_selq_apply(selq);
0067 
0068     kvfree(selq->standby);
0069     selq->standby = NULL;
0070 }
0071 
0072 void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
0073 {
0074     struct mlx5e_selq_params *selq_active;
0075 
0076     lockdep_assert_held(selq->state_lock);
0077     WARN_ON_ONCE(selq->is_prepared);
0078 
0079     selq->is_prepared = true;
0080 
0081     selq_active = rcu_dereference_protected(selq->active,
0082                         lockdep_is_held(selq->state_lock));
0083     *selq->standby = *selq_active;
0084     selq->standby->num_channels = params->num_channels;
0085     selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
0086     selq->standby->num_regular_queues =
0087         selq->standby->num_channels * selq->standby->num_tcs;
0088     selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
0089 }
0090 
0091 bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
0092 {
0093     struct mlx5e_selq_params *selq_active =
0094         rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
0095 
0096     return selq_active->htb_maj_id;
0097 }
0098 
0099 void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
0100 {
0101     struct mlx5e_selq_params *selq_active;
0102 
0103     lockdep_assert_held(selq->state_lock);
0104     WARN_ON_ONCE(selq->is_prepared);
0105 
0106     selq->is_prepared = true;
0107 
0108     selq_active = rcu_dereference_protected(selq->active,
0109                         lockdep_is_held(selq->state_lock));
0110     *selq->standby = *selq_active;
0111     selq->standby->is_htb = htb_maj_id;
0112     selq->standby->htb_maj_id = htb_maj_id;
0113     selq->standby->htb_defcls = htb_defcls;
0114 }
0115 
0116 void mlx5e_selq_apply(struct mlx5e_selq *selq)
0117 {
0118     struct mlx5e_selq_params *old_params;
0119 
0120     WARN_ON_ONCE(!selq->is_prepared);
0121 
0122     selq->is_prepared = false;
0123 
0124     old_params = rcu_replace_pointer(selq->active, selq->standby,
0125                      lockdep_is_held(selq->state_lock));
0126     synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
0127     selq->standby = old_params;
0128 }
0129 
0130 void mlx5e_selq_cancel(struct mlx5e_selq *selq)
0131 {
0132     lockdep_assert_held(selq->state_lock);
0133     WARN_ON_ONCE(!selq->is_prepared);
0134 
0135     selq->is_prepared = false;
0136 }
0137 
0138 #ifdef CONFIG_MLX5_CORE_EN_DCB
0139 static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
0140 {
0141     int dscp_cp = 0;
0142 
0143     if (skb->protocol == htons(ETH_P_IP))
0144         dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
0145     else if (skb->protocol == htons(ETH_P_IPV6))
0146         dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
0147 
0148     return priv->dcbx_dp.dscp2prio[dscp_cp];
0149 }
0150 #endif
0151 
0152 static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
0153 {
0154 #ifdef CONFIG_MLX5_CORE_EN_DCB
0155     if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
0156         return mlx5e_get_dscp_up(priv, skb);
0157 #endif
0158     if (skb_vlan_tag_present(skb))
0159         return skb_vlan_tag_get_prio(skb);
0160     return 0;
0161 }
0162 
0163 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
0164                   struct mlx5e_selq_params *selq)
0165 {
0166     struct mlx5e_priv *priv = netdev_priv(dev);
0167     int up;
0168 
0169     up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
0170 
0171     return selq->num_regular_queues + up;
0172 }
0173 
0174 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
0175                   struct mlx5e_selq_params *selq)
0176 {
0177     u16 classid;
0178 
0179     /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
0180     if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
0181         classid = TC_H_MIN(skb->priority);
0182     else
0183         classid = selq->htb_defcls;
0184 
0185     if (!classid)
0186         return 0;
0187 
0188     return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
0189 }
0190 
0191 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
0192                struct net_device *sb_dev)
0193 {
0194     struct mlx5e_priv *priv = netdev_priv(dev);
0195     struct mlx5e_selq_params *selq;
0196     int txq_ix, up;
0197 
0198     selq = rcu_dereference_bh(priv->selq.active);
0199 
0200     /* This is a workaround needed only for the mlx5e_netdev_change_profile
0201      * flow that zeroes out the whole priv without unregistering the netdev
0202      * and without preventing ndo_select_queue from being called.
0203      */
0204     if (unlikely(!selq))
0205         return 0;
0206 
0207     if (likely(!selq->is_special_queues)) {
0208         /* No special queues, netdev_pick_tx returns one of the regular ones. */
0209 
0210         txq_ix = netdev_pick_tx(dev, skb, NULL);
0211 
0212         if (selq->num_tcs <= 1)
0213             return txq_ix;
0214 
0215         up = mlx5e_get_up(priv, skb);
0216 
0217         /* Normalize any picked txq_ix to [0, num_channels),
0218          * So we can return a txq_ix that matches the channel and
0219          * packet UP.
0220          */
0221         return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
0222             up * selq->num_channels;
0223     }
0224 
0225     if (unlikely(selq->htb_maj_id)) {
0226         /* num_tcs == 1, shortcut for PTP */
0227 
0228         txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
0229         if (txq_ix > 0)
0230             return txq_ix;
0231 
0232         if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
0233             return selq->num_channels;
0234 
0235         txq_ix = netdev_pick_tx(dev, skb, NULL);
0236 
0237         /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
0238          * If they are selected, switch to regular queues.
0239          * Driver to select these queues only at mlx5e_select_ptpsq()
0240          * and mlx5e_select_htb_queue().
0241          */
0242         return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
0243     }
0244 
0245     /* PTP is enabled */
0246 
0247     if (mlx5e_use_ptpsq(skb))
0248         return mlx5e_select_ptpsq(dev, skb, selq);
0249 
0250     txq_ix = netdev_pick_tx(dev, skb, NULL);
0251 
0252     /* Normalize any picked txq_ix to [0, num_channels). Queues in range
0253      * [0, num_regular_queues) will be mapped to the corresponding channel
0254      * index, so that we can apply the packet's UP (if num_tcs > 1).
0255      * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
0256      * because driver should select the PTP only at mlx5e_select_ptpsq().
0257      */
0258     txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
0259 
0260     if (selq->num_tcs <= 1)
0261         return txq_ix;
0262 
0263     up = mlx5e_get_up(priv, skb);
0264 
0265     return txq_ix + up * selq->num_channels;
0266 }