Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2008-2011, Intel Corporation.
0004  *
0005  * Description: Data Center Bridging netlink interface
0006  * Author: Lucy Liu <lucy.liu@intel.com>
0007  */
0008 
0009 #include <linux/netdevice.h>
0010 #include <linux/netlink.h>
0011 #include <linux/slab.h>
0012 #include <net/netlink.h>
0013 #include <net/rtnetlink.h>
0014 #include <linux/dcbnl.h>
0015 #include <net/dcbevent.h>
0016 #include <linux/rtnetlink.h>
0017 #include <linux/init.h>
0018 #include <net/sock.h>
0019 
0020 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
0021  * intended to allow network traffic with differing requirements
0022  * (highly reliable, no drops vs. best effort vs. low latency) to operate
0023  * and co-exist on Ethernet.  Current DCB features are:
0024  *
0025  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
0026  *   framework for assigning bandwidth guarantees to traffic classes.
0027  *
0028  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
0029  *   can work independently for each 802.1p priority.
0030  *
0031  * Congestion Notification - provides a mechanism for end-to-end congestion
0032  *   control for protocols which do not have built-in congestion management.
0033  *
0034  * More information about the emerging standards for these Ethernet features
0035  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
0036  *
0037  * This file implements an rtnetlink interface to allow configuration of DCB
0038  * features for capable devices.
0039  */
0040 
0041 /**************** DCB attribute policies *************************************/
0042 
0043 /* DCB netlink attributes policy */
0044 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
0045     [DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
0046     [DCB_ATTR_STATE]       = {.type = NLA_U8},
0047     [DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
0048     [DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
0049     [DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
0050     [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
0051     [DCB_ATTR_CAP]         = {.type = NLA_NESTED},
0052     [DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
0053     [DCB_ATTR_BCN]         = {.type = NLA_NESTED},
0054     [DCB_ATTR_APP]         = {.type = NLA_NESTED},
0055     [DCB_ATTR_IEEE]        = {.type = NLA_NESTED},
0056     [DCB_ATTR_DCBX]        = {.type = NLA_U8},
0057     [DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
0058 };
0059 
0060 /* DCB priority flow control to User Priority nested attributes */
0061 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
0062     [DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
0063     [DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
0064     [DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
0065     [DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
0066     [DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
0067     [DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
0068     [DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
0069     [DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
0070     [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
0071 };
0072 
0073 /* DCB priority grouping nested attributes */
0074 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
0075     [DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
0076     [DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
0077     [DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
0078     [DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
0079     [DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
0080     [DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
0081     [DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
0082     [DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
0083     [DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
0084     [DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
0085     [DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
0086     [DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
0087     [DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
0088     [DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
0089     [DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
0090     [DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
0091     [DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
0092     [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
0093 };
0094 
0095 /* DCB traffic class nested attributes. */
0096 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
0097     [DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
0098     [DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
0099     [DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
0100     [DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
0101     [DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
0102 };
0103 
0104 /* DCB capabilities nested attributes. */
0105 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
0106     [DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
0107     [DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
0108     [DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
0109     [DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
0110     [DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
0111     [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
0112     [DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
0113     [DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
0114     [DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
0115 };
0116 
0117 /* DCB capabilities nested attributes. */
0118 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
0119     [DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
0120     [DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
0121     [DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
0122 };
0123 
0124 /* DCB BCN nested attributes. */
0125 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
0126     [DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
0127     [DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
0128     [DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
0129     [DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
0130     [DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
0131     [DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
0132     [DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
0133     [DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
0134     [DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
0135     [DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
0136     [DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
0137     [DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
0138     [DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
0139     [DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
0140     [DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
0141     [DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
0142     [DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
0143     [DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
0144     [DCB_BCN_ATTR_W]            = {.type = NLA_U32},
0145     [DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
0146     [DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
0147     [DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
0148     [DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
0149     [DCB_BCN_ATTR_C]            = {.type = NLA_U32},
0150     [DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
0151 };
0152 
0153 /* DCB APP nested attributes. */
0154 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
0155     [DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
0156     [DCB_APP_ATTR_ID]           = {.type = NLA_U16},
0157     [DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
0158 };
0159 
0160 /* IEEE 802.1Qaz nested attributes. */
0161 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
0162     [DCB_ATTR_IEEE_ETS]     = {.len = sizeof(struct ieee_ets)},
0163     [DCB_ATTR_IEEE_PFC]     = {.len = sizeof(struct ieee_pfc)},
0164     [DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
0165     [DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
0166     [DCB_ATTR_IEEE_QCN]         = {.len = sizeof(struct ieee_qcn)},
0167     [DCB_ATTR_IEEE_QCN_STATS]   = {.len = sizeof(struct ieee_qcn_stats)},
0168     [DCB_ATTR_DCB_BUFFER]       = {.len = sizeof(struct dcbnl_buffer)},
0169 };
0170 
0171 /* DCB number of traffic classes nested attributes. */
0172 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
0173     [DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
0174     [DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
0175     [DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
0176     [DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
0177 };
0178 
0179 static LIST_HEAD(dcb_app_list);
0180 static DEFINE_SPINLOCK(dcb_lock);
0181 
0182 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
0183                     u32 flags, struct nlmsghdr **nlhp)
0184 {
0185     struct sk_buff *skb;
0186     struct dcbmsg *dcb;
0187     struct nlmsghdr *nlh;
0188 
0189     skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
0190     if (!skb)
0191         return NULL;
0192 
0193     nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
0194     BUG_ON(!nlh);
0195 
0196     dcb = nlmsg_data(nlh);
0197     dcb->dcb_family = AF_UNSPEC;
0198     dcb->cmd = cmd;
0199     dcb->dcb_pad = 0;
0200 
0201     if (nlhp)
0202         *nlhp = nlh;
0203 
0204     return skb;
0205 }
0206 
0207 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
0208               u32 seq, struct nlattr **tb, struct sk_buff *skb)
0209 {
0210     /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
0211     if (!netdev->dcbnl_ops->getstate)
0212         return -EOPNOTSUPP;
0213 
0214     return nla_put_u8(skb, DCB_ATTR_STATE,
0215               netdev->dcbnl_ops->getstate(netdev));
0216 }
0217 
0218 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
0219                u32 seq, struct nlattr **tb, struct sk_buff *skb)
0220 {
0221     struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
0222     u8 value;
0223     int ret;
0224     int i;
0225     int getall = 0;
0226 
0227     if (!tb[DCB_ATTR_PFC_CFG])
0228         return -EINVAL;
0229 
0230     if (!netdev->dcbnl_ops->getpfccfg)
0231         return -EOPNOTSUPP;
0232 
0233     ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
0234                       tb[DCB_ATTR_PFC_CFG],
0235                       dcbnl_pfc_up_nest, NULL);
0236     if (ret)
0237         return ret;
0238 
0239     nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
0240     if (!nest)
0241         return -EMSGSIZE;
0242 
0243     if (data[DCB_PFC_UP_ATTR_ALL])
0244         getall = 1;
0245 
0246     for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
0247         if (!getall && !data[i])
0248             continue;
0249 
0250         netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
0251                                      &value);
0252         ret = nla_put_u8(skb, i, value);
0253         if (ret) {
0254             nla_nest_cancel(skb, nest);
0255             return ret;
0256         }
0257     }
0258     nla_nest_end(skb, nest);
0259 
0260     return 0;
0261 }
0262 
0263 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
0264                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
0265 {
0266     u8 perm_addr[MAX_ADDR_LEN];
0267 
0268     if (!netdev->dcbnl_ops->getpermhwaddr)
0269         return -EOPNOTSUPP;
0270 
0271     memset(perm_addr, 0, sizeof(perm_addr));
0272     netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
0273 
0274     return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
0275 }
0276 
0277 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
0278             u32 seq, struct nlattr **tb, struct sk_buff *skb)
0279 {
0280     struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
0281     u8 value;
0282     int ret;
0283     int i;
0284     int getall = 0;
0285 
0286     if (!tb[DCB_ATTR_CAP])
0287         return -EINVAL;
0288 
0289     if (!netdev->dcbnl_ops->getcap)
0290         return -EOPNOTSUPP;
0291 
0292     ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
0293                       tb[DCB_ATTR_CAP], dcbnl_cap_nest,
0294                       NULL);
0295     if (ret)
0296         return ret;
0297 
0298     nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
0299     if (!nest)
0300         return -EMSGSIZE;
0301 
0302     if (data[DCB_CAP_ATTR_ALL])
0303         getall = 1;
0304 
0305     for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
0306         if (!getall && !data[i])
0307             continue;
0308 
0309         if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
0310             ret = nla_put_u8(skb, i, value);
0311             if (ret) {
0312                 nla_nest_cancel(skb, nest);
0313                 return ret;
0314             }
0315         }
0316     }
0317     nla_nest_end(skb, nest);
0318 
0319     return 0;
0320 }
0321 
0322 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
0323                u32 seq, struct nlattr **tb, struct sk_buff *skb)
0324 {
0325     struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
0326     u8 value;
0327     int ret;
0328     int i;
0329     int getall = 0;
0330 
0331     if (!tb[DCB_ATTR_NUMTCS])
0332         return -EINVAL;
0333 
0334     if (!netdev->dcbnl_ops->getnumtcs)
0335         return -EOPNOTSUPP;
0336 
0337     ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
0338                       tb[DCB_ATTR_NUMTCS],
0339                       dcbnl_numtcs_nest, NULL);
0340     if (ret)
0341         return ret;
0342 
0343     nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
0344     if (!nest)
0345         return -EMSGSIZE;
0346 
0347     if (data[DCB_NUMTCS_ATTR_ALL])
0348         getall = 1;
0349 
0350     for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
0351         if (!getall && !data[i])
0352             continue;
0353 
0354         ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
0355         if (!ret) {
0356             ret = nla_put_u8(skb, i, value);
0357             if (ret) {
0358                 nla_nest_cancel(skb, nest);
0359                 return ret;
0360             }
0361         } else
0362             return -EINVAL;
0363     }
0364     nla_nest_end(skb, nest);
0365 
0366     return 0;
0367 }
0368 
0369 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
0370                u32 seq, struct nlattr **tb, struct sk_buff *skb)
0371 {
0372     struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
0373     int ret;
0374     u8 value;
0375     int i;
0376 
0377     if (!tb[DCB_ATTR_NUMTCS])
0378         return -EINVAL;
0379 
0380     if (!netdev->dcbnl_ops->setnumtcs)
0381         return -EOPNOTSUPP;
0382 
0383     ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
0384                       tb[DCB_ATTR_NUMTCS],
0385                       dcbnl_numtcs_nest, NULL);
0386     if (ret)
0387         return ret;
0388 
0389     for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
0390         if (data[i] == NULL)
0391             continue;
0392 
0393         value = nla_get_u8(data[i]);
0394 
0395         ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
0396         if (ret)
0397             break;
0398     }
0399 
0400     return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
0401 }
0402 
0403 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
0404                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0405 {
0406     if (!netdev->dcbnl_ops->getpfcstate)
0407         return -EOPNOTSUPP;
0408 
0409     return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
0410               netdev->dcbnl_ops->getpfcstate(netdev));
0411 }
0412 
0413 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
0414                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0415 {
0416     u8 value;
0417 
0418     if (!tb[DCB_ATTR_PFC_STATE])
0419         return -EINVAL;
0420 
0421     if (!netdev->dcbnl_ops->setpfcstate)
0422         return -EOPNOTSUPP;
0423 
0424     value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
0425 
0426     netdev->dcbnl_ops->setpfcstate(netdev, value);
0427 
0428     return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
0429 }
0430 
0431 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
0432             u32 seq, struct nlattr **tb, struct sk_buff *skb)
0433 {
0434     struct nlattr *app_nest;
0435     struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
0436     u16 id;
0437     u8 up, idtype;
0438     int ret;
0439 
0440     if (!tb[DCB_ATTR_APP])
0441         return -EINVAL;
0442 
0443     ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
0444                       tb[DCB_ATTR_APP], dcbnl_app_nest,
0445                       NULL);
0446     if (ret)
0447         return ret;
0448 
0449     /* all must be non-null */
0450     if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
0451         (!app_tb[DCB_APP_ATTR_ID]))
0452         return -EINVAL;
0453 
0454     /* either by eth type or by socket number */
0455     idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
0456     if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
0457         (idtype != DCB_APP_IDTYPE_PORTNUM))
0458         return -EINVAL;
0459 
0460     id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
0461 
0462     if (netdev->dcbnl_ops->getapp) {
0463         ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
0464         if (ret < 0)
0465             return ret;
0466         else
0467             up = ret;
0468     } else {
0469         struct dcb_app app = {
0470                     .selector = idtype,
0471                     .protocol = id,
0472                      };
0473         up = dcb_getapp(netdev, &app);
0474     }
0475 
0476     app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
0477     if (!app_nest)
0478         return -EMSGSIZE;
0479 
0480     ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
0481     if (ret)
0482         goto out_cancel;
0483 
0484     ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
0485     if (ret)
0486         goto out_cancel;
0487 
0488     ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
0489     if (ret)
0490         goto out_cancel;
0491 
0492     nla_nest_end(skb, app_nest);
0493 
0494     return 0;
0495 
0496 out_cancel:
0497     nla_nest_cancel(skb, app_nest);
0498     return ret;
0499 }
0500 
0501 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
0502             u32 seq, struct nlattr **tb, struct sk_buff *skb)
0503 {
0504     int ret;
0505     u16 id;
0506     u8 up, idtype;
0507     struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
0508 
0509     if (!tb[DCB_ATTR_APP])
0510         return -EINVAL;
0511 
0512     ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
0513                       tb[DCB_ATTR_APP], dcbnl_app_nest,
0514                       NULL);
0515     if (ret)
0516         return ret;
0517 
0518     /* all must be non-null */
0519     if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
0520         (!app_tb[DCB_APP_ATTR_ID]) ||
0521         (!app_tb[DCB_APP_ATTR_PRIORITY]))
0522         return -EINVAL;
0523 
0524     /* either by eth type or by socket number */
0525     idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
0526     if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
0527         (idtype != DCB_APP_IDTYPE_PORTNUM))
0528         return -EINVAL;
0529 
0530     id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
0531     up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
0532 
0533     if (netdev->dcbnl_ops->setapp) {
0534         ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
0535         if (ret < 0)
0536             return ret;
0537     } else {
0538         struct dcb_app app;
0539         app.selector = idtype;
0540         app.protocol = id;
0541         app.priority = up;
0542         ret = dcb_setapp(netdev, &app);
0543     }
0544 
0545     ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
0546     dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
0547 
0548     return ret;
0549 }
0550 
0551 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0552                  struct nlattr **tb, struct sk_buff *skb, int dir)
0553 {
0554     struct nlattr *pg_nest, *param_nest, *data;
0555     struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
0556     struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
0557     u8 prio, pgid, tc_pct, up_map;
0558     int ret;
0559     int getall = 0;
0560     int i;
0561 
0562     if (!tb[DCB_ATTR_PG_CFG])
0563         return -EINVAL;
0564 
0565     if (!netdev->dcbnl_ops->getpgtccfgtx ||
0566         !netdev->dcbnl_ops->getpgtccfgrx ||
0567         !netdev->dcbnl_ops->getpgbwgcfgtx ||
0568         !netdev->dcbnl_ops->getpgbwgcfgrx)
0569         return -EOPNOTSUPP;
0570 
0571     ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
0572                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
0573                       NULL);
0574     if (ret)
0575         return ret;
0576 
0577     pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
0578     if (!pg_nest)
0579         return -EMSGSIZE;
0580 
0581     if (pg_tb[DCB_PG_ATTR_TC_ALL])
0582         getall = 1;
0583 
0584     for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
0585         if (!getall && !pg_tb[i])
0586             continue;
0587 
0588         if (pg_tb[DCB_PG_ATTR_TC_ALL])
0589             data = pg_tb[DCB_PG_ATTR_TC_ALL];
0590         else
0591             data = pg_tb[i];
0592         ret = nla_parse_nested_deprecated(param_tb,
0593                           DCB_TC_ATTR_PARAM_MAX, data,
0594                           dcbnl_tc_param_nest, NULL);
0595         if (ret)
0596             goto err_pg;
0597 
0598         param_nest = nla_nest_start_noflag(skb, i);
0599         if (!param_nest)
0600             goto err_pg;
0601 
0602         pgid = DCB_ATTR_VALUE_UNDEFINED;
0603         prio = DCB_ATTR_VALUE_UNDEFINED;
0604         tc_pct = DCB_ATTR_VALUE_UNDEFINED;
0605         up_map = DCB_ATTR_VALUE_UNDEFINED;
0606 
0607         if (dir) {
0608             /* Rx */
0609             netdev->dcbnl_ops->getpgtccfgrx(netdev,
0610                         i - DCB_PG_ATTR_TC_0, &prio,
0611                         &pgid, &tc_pct, &up_map);
0612         } else {
0613             /* Tx */
0614             netdev->dcbnl_ops->getpgtccfgtx(netdev,
0615                         i - DCB_PG_ATTR_TC_0, &prio,
0616                         &pgid, &tc_pct, &up_map);
0617         }
0618 
0619         if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
0620             param_tb[DCB_TC_ATTR_PARAM_ALL]) {
0621             ret = nla_put_u8(skb,
0622                              DCB_TC_ATTR_PARAM_PGID, pgid);
0623             if (ret)
0624                 goto err_param;
0625         }
0626         if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
0627             param_tb[DCB_TC_ATTR_PARAM_ALL]) {
0628             ret = nla_put_u8(skb,
0629                              DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
0630             if (ret)
0631                 goto err_param;
0632         }
0633         if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
0634             param_tb[DCB_TC_ATTR_PARAM_ALL]) {
0635             ret = nla_put_u8(skb,
0636                              DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
0637             if (ret)
0638                 goto err_param;
0639         }
0640         if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
0641             param_tb[DCB_TC_ATTR_PARAM_ALL]) {
0642             ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
0643                              tc_pct);
0644             if (ret)
0645                 goto err_param;
0646         }
0647         nla_nest_end(skb, param_nest);
0648     }
0649 
0650     if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
0651         getall = 1;
0652     else
0653         getall = 0;
0654 
0655     for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
0656         if (!getall && !pg_tb[i])
0657             continue;
0658 
0659         tc_pct = DCB_ATTR_VALUE_UNDEFINED;
0660 
0661         if (dir) {
0662             /* Rx */
0663             netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
0664                     i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
0665         } else {
0666             /* Tx */
0667             netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
0668                     i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
0669         }
0670         ret = nla_put_u8(skb, i, tc_pct);
0671         if (ret)
0672             goto err_pg;
0673     }
0674 
0675     nla_nest_end(skb, pg_nest);
0676 
0677     return 0;
0678 
0679 err_param:
0680     nla_nest_cancel(skb, param_nest);
0681 err_pg:
0682     nla_nest_cancel(skb, pg_nest);
0683 
0684     return -EMSGSIZE;
0685 }
0686 
0687 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0688                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0689 {
0690     return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
0691 }
0692 
0693 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0694                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0695 {
0696     return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
0697 }
0698 
0699 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
0700               u32 seq, struct nlattr **tb, struct sk_buff *skb)
0701 {
0702     u8 value;
0703 
0704     if (!tb[DCB_ATTR_STATE])
0705         return -EINVAL;
0706 
0707     if (!netdev->dcbnl_ops->setstate)
0708         return -EOPNOTSUPP;
0709 
0710     value = nla_get_u8(tb[DCB_ATTR_STATE]);
0711 
0712     return nla_put_u8(skb, DCB_ATTR_STATE,
0713               netdev->dcbnl_ops->setstate(netdev, value));
0714 }
0715 
0716 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
0717                u32 seq, struct nlattr **tb, struct sk_buff *skb)
0718 {
0719     struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
0720     int i;
0721     int ret;
0722     u8 value;
0723 
0724     if (!tb[DCB_ATTR_PFC_CFG])
0725         return -EINVAL;
0726 
0727     if (!netdev->dcbnl_ops->setpfccfg)
0728         return -EOPNOTSUPP;
0729 
0730     ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
0731                       tb[DCB_ATTR_PFC_CFG],
0732                       dcbnl_pfc_up_nest, NULL);
0733     if (ret)
0734         return ret;
0735 
0736     for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
0737         if (data[i] == NULL)
0738             continue;
0739         value = nla_get_u8(data[i]);
0740         netdev->dcbnl_ops->setpfccfg(netdev,
0741             data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
0742     }
0743 
0744     return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
0745 }
0746 
0747 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
0748             u32 seq, struct nlattr **tb, struct sk_buff *skb)
0749 {
0750     int ret;
0751 
0752     if (!tb[DCB_ATTR_SET_ALL])
0753         return -EINVAL;
0754 
0755     if (!netdev->dcbnl_ops->setall)
0756         return -EOPNOTSUPP;
0757 
0758     ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
0759              netdev->dcbnl_ops->setall(netdev));
0760     dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
0761 
0762     return ret;
0763 }
0764 
0765 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0766                  u32 seq, struct nlattr **tb, struct sk_buff *skb,
0767                  int dir)
0768 {
0769     struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
0770     struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
0771     int ret;
0772     int i;
0773     u8 pgid;
0774     u8 up_map;
0775     u8 prio;
0776     u8 tc_pct;
0777 
0778     if (!tb[DCB_ATTR_PG_CFG])
0779         return -EINVAL;
0780 
0781     if (!netdev->dcbnl_ops->setpgtccfgtx ||
0782         !netdev->dcbnl_ops->setpgtccfgrx ||
0783         !netdev->dcbnl_ops->setpgbwgcfgtx ||
0784         !netdev->dcbnl_ops->setpgbwgcfgrx)
0785         return -EOPNOTSUPP;
0786 
0787     ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
0788                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
0789                       NULL);
0790     if (ret)
0791         return ret;
0792 
0793     for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
0794         if (!pg_tb[i])
0795             continue;
0796 
0797         ret = nla_parse_nested_deprecated(param_tb,
0798                           DCB_TC_ATTR_PARAM_MAX,
0799                           pg_tb[i],
0800                           dcbnl_tc_param_nest, NULL);
0801         if (ret)
0802             return ret;
0803 
0804         pgid = DCB_ATTR_VALUE_UNDEFINED;
0805         prio = DCB_ATTR_VALUE_UNDEFINED;
0806         tc_pct = DCB_ATTR_VALUE_UNDEFINED;
0807         up_map = DCB_ATTR_VALUE_UNDEFINED;
0808 
0809         if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
0810             prio =
0811                 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
0812 
0813         if (param_tb[DCB_TC_ATTR_PARAM_PGID])
0814             pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
0815 
0816         if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
0817             tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
0818 
0819         if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
0820             up_map =
0821                  nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
0822 
0823         /* dir: Tx = 0, Rx = 1 */
0824         if (dir) {
0825             /* Rx */
0826             netdev->dcbnl_ops->setpgtccfgrx(netdev,
0827                 i - DCB_PG_ATTR_TC_0,
0828                 prio, pgid, tc_pct, up_map);
0829         } else {
0830             /* Tx */
0831             netdev->dcbnl_ops->setpgtccfgtx(netdev,
0832                 i - DCB_PG_ATTR_TC_0,
0833                 prio, pgid, tc_pct, up_map);
0834         }
0835     }
0836 
0837     for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
0838         if (!pg_tb[i])
0839             continue;
0840 
0841         tc_pct = nla_get_u8(pg_tb[i]);
0842 
0843         /* dir: Tx = 0, Rx = 1 */
0844         if (dir) {
0845             /* Rx */
0846             netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
0847                      i - DCB_PG_ATTR_BW_ID_0, tc_pct);
0848         } else {
0849             /* Tx */
0850             netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
0851                      i - DCB_PG_ATTR_BW_ID_0, tc_pct);
0852         }
0853     }
0854 
0855     return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
0856 }
0857 
0858 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0859                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0860 {
0861     return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
0862 }
0863 
0864 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0865                  u32 seq, struct nlattr **tb, struct sk_buff *skb)
0866 {
0867     return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
0868 }
0869 
0870 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0871                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
0872 {
0873     struct nlattr *bcn_nest;
0874     struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
0875     u8 value_byte;
0876     u32 value_integer;
0877     int ret;
0878     bool getall = false;
0879     int i;
0880 
0881     if (!tb[DCB_ATTR_BCN])
0882         return -EINVAL;
0883 
0884     if (!netdev->dcbnl_ops->getbcnrp ||
0885         !netdev->dcbnl_ops->getbcncfg)
0886         return -EOPNOTSUPP;
0887 
0888     ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
0889                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
0890                       NULL);
0891     if (ret)
0892         return ret;
0893 
0894     bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
0895     if (!bcn_nest)
0896         return -EMSGSIZE;
0897 
0898     if (bcn_tb[DCB_BCN_ATTR_ALL])
0899         getall = true;
0900 
0901     for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
0902         if (!getall && !bcn_tb[i])
0903             continue;
0904 
0905         netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
0906                                     &value_byte);
0907         ret = nla_put_u8(skb, i, value_byte);
0908         if (ret)
0909             goto err_bcn;
0910     }
0911 
0912     for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
0913         if (!getall && !bcn_tb[i])
0914             continue;
0915 
0916         netdev->dcbnl_ops->getbcncfg(netdev, i,
0917                                      &value_integer);
0918         ret = nla_put_u32(skb, i, value_integer);
0919         if (ret)
0920             goto err_bcn;
0921     }
0922 
0923     nla_nest_end(skb, bcn_nest);
0924 
0925     return 0;
0926 
0927 err_bcn:
0928     nla_nest_cancel(skb, bcn_nest);
0929     return ret;
0930 }
0931 
0932 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
0933                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
0934 {
0935     struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
0936     int i;
0937     int ret;
0938     u8 value_byte;
0939     u32 value_int;
0940 
0941     if (!tb[DCB_ATTR_BCN])
0942         return -EINVAL;
0943 
0944     if (!netdev->dcbnl_ops->setbcncfg ||
0945         !netdev->dcbnl_ops->setbcnrp)
0946         return -EOPNOTSUPP;
0947 
0948     ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
0949                       tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
0950                       NULL);
0951     if (ret)
0952         return ret;
0953 
0954     for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
0955         if (data[i] == NULL)
0956             continue;
0957         value_byte = nla_get_u8(data[i]);
0958         netdev->dcbnl_ops->setbcnrp(netdev,
0959             data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
0960     }
0961 
0962     for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
0963         if (data[i] == NULL)
0964             continue;
0965         value_int = nla_get_u32(data[i]);
0966         netdev->dcbnl_ops->setbcncfg(netdev,
0967                                          i, value_int);
0968     }
0969 
0970     return nla_put_u8(skb, DCB_ATTR_BCN, 0);
0971 }
0972 
0973 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
0974                 int app_nested_type, int app_info_type,
0975                 int app_entry_type)
0976 {
0977     struct dcb_peer_app_info info;
0978     struct dcb_app *table = NULL;
0979     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
0980     u16 app_count;
0981     int err;
0982 
0983 
0984     /**
0985      * retrieve the peer app configuration form the driver. If the driver
0986      * handlers fail exit without doing anything
0987      */
0988     err = ops->peer_getappinfo(netdev, &info, &app_count);
0989     if (!err && app_count) {
0990         table = kmalloc_array(app_count, sizeof(struct dcb_app),
0991                       GFP_KERNEL);
0992         if (!table)
0993             return -ENOMEM;
0994 
0995         err = ops->peer_getapptable(netdev, table);
0996     }
0997 
0998     if (!err) {
0999         u16 i;
1000         struct nlattr *app;
1001 
1002         /**
1003          * build the message, from here on the only possible failure
1004          * is due to the skb size
1005          */
1006         err = -EMSGSIZE;
1007 
1008         app = nla_nest_start_noflag(skb, app_nested_type);
1009         if (!app)
1010             goto nla_put_failure;
1011 
1012         if (app_info_type &&
1013             nla_put(skb, app_info_type, sizeof(info), &info))
1014             goto nla_put_failure;
1015 
1016         for (i = 0; i < app_count; i++) {
1017             if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1018                     &table[i]))
1019                 goto nla_put_failure;
1020         }
1021         nla_nest_end(skb, app);
1022     }
1023     err = 0;
1024 
1025 nla_put_failure:
1026     kfree(table);
1027     return err;
1028 }
1029 
1030 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1031 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1032 {
1033     struct nlattr *ieee, *app;
1034     struct dcb_app_type *itr;
1035     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1036     int dcbx;
1037     int err;
1038 
1039     if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1040         return -EMSGSIZE;
1041 
1042     ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
1043     if (!ieee)
1044         return -EMSGSIZE;
1045 
1046     if (ops->ieee_getets) {
1047         struct ieee_ets ets;
1048         memset(&ets, 0, sizeof(ets));
1049         err = ops->ieee_getets(netdev, &ets);
1050         if (!err &&
1051             nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1052             return -EMSGSIZE;
1053     }
1054 
1055     if (ops->ieee_getmaxrate) {
1056         struct ieee_maxrate maxrate;
1057         memset(&maxrate, 0, sizeof(maxrate));
1058         err = ops->ieee_getmaxrate(netdev, &maxrate);
1059         if (!err) {
1060             err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1061                       sizeof(maxrate), &maxrate);
1062             if (err)
1063                 return -EMSGSIZE;
1064         }
1065     }
1066 
1067     if (ops->ieee_getqcn) {
1068         struct ieee_qcn qcn;
1069 
1070         memset(&qcn, 0, sizeof(qcn));
1071         err = ops->ieee_getqcn(netdev, &qcn);
1072         if (!err) {
1073             err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1074                       sizeof(qcn), &qcn);
1075             if (err)
1076                 return -EMSGSIZE;
1077         }
1078     }
1079 
1080     if (ops->ieee_getqcnstats) {
1081         struct ieee_qcn_stats qcn_stats;
1082 
1083         memset(&qcn_stats, 0, sizeof(qcn_stats));
1084         err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1085         if (!err) {
1086             err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1087                       sizeof(qcn_stats), &qcn_stats);
1088             if (err)
1089                 return -EMSGSIZE;
1090         }
1091     }
1092 
1093     if (ops->ieee_getpfc) {
1094         struct ieee_pfc pfc;
1095         memset(&pfc, 0, sizeof(pfc));
1096         err = ops->ieee_getpfc(netdev, &pfc);
1097         if (!err &&
1098             nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1099             return -EMSGSIZE;
1100     }
1101 
1102     if (ops->dcbnl_getbuffer) {
1103         struct dcbnl_buffer buffer;
1104 
1105         memset(&buffer, 0, sizeof(buffer));
1106         err = ops->dcbnl_getbuffer(netdev, &buffer);
1107         if (!err &&
1108             nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
1109             return -EMSGSIZE;
1110     }
1111 
1112     app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
1113     if (!app)
1114         return -EMSGSIZE;
1115 
1116     spin_lock_bh(&dcb_lock);
1117     list_for_each_entry(itr, &dcb_app_list, list) {
1118         if (itr->ifindex == netdev->ifindex) {
1119             err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1120                      &itr->app);
1121             if (err) {
1122                 spin_unlock_bh(&dcb_lock);
1123                 return -EMSGSIZE;
1124             }
1125         }
1126     }
1127 
1128     if (netdev->dcbnl_ops->getdcbx)
1129         dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1130     else
1131         dcbx = -EOPNOTSUPP;
1132 
1133     spin_unlock_bh(&dcb_lock);
1134     nla_nest_end(skb, app);
1135 
1136     /* get peer info if available */
1137     if (ops->ieee_peer_getets) {
1138         struct ieee_ets ets;
1139         memset(&ets, 0, sizeof(ets));
1140         err = ops->ieee_peer_getets(netdev, &ets);
1141         if (!err &&
1142             nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1143             return -EMSGSIZE;
1144     }
1145 
1146     if (ops->ieee_peer_getpfc) {
1147         struct ieee_pfc pfc;
1148         memset(&pfc, 0, sizeof(pfc));
1149         err = ops->ieee_peer_getpfc(netdev, &pfc);
1150         if (!err &&
1151             nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1152             return -EMSGSIZE;
1153     }
1154 
1155     if (ops->peer_getappinfo && ops->peer_getapptable) {
1156         err = dcbnl_build_peer_app(netdev, skb,
1157                        DCB_ATTR_IEEE_PEER_APP,
1158                        DCB_ATTR_IEEE_APP_UNSPEC,
1159                        DCB_ATTR_IEEE_APP);
1160         if (err)
1161             return -EMSGSIZE;
1162     }
1163 
1164     nla_nest_end(skb, ieee);
1165     if (dcbx >= 0) {
1166         err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1167         if (err)
1168             return -EMSGSIZE;
1169     }
1170 
1171     return 0;
1172 }
1173 
1174 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1175                  int dir)
1176 {
1177     u8 pgid, up_map, prio, tc_pct;
1178     const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1179     int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1180     struct nlattr *pg = nla_nest_start_noflag(skb, i);
1181 
1182     if (!pg)
1183         return -EMSGSIZE;
1184 
1185     for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1186         struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
1187 
1188         if (!tc_nest)
1189             return -EMSGSIZE;
1190 
1191         pgid = DCB_ATTR_VALUE_UNDEFINED;
1192         prio = DCB_ATTR_VALUE_UNDEFINED;
1193         tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1194         up_map = DCB_ATTR_VALUE_UNDEFINED;
1195 
1196         if (!dir)
1197             ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1198                       &prio, &pgid, &tc_pct, &up_map);
1199         else
1200             ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1201                       &prio, &pgid, &tc_pct, &up_map);
1202 
1203         if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1204             nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1205             nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1206             nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1207             return -EMSGSIZE;
1208         nla_nest_end(skb, tc_nest);
1209     }
1210 
1211     for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1212         tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1213 
1214         if (!dir)
1215             ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1216                        &tc_pct);
1217         else
1218             ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1219                        &tc_pct);
1220         if (nla_put_u8(skb, i, tc_pct))
1221             return -EMSGSIZE;
1222     }
1223     nla_nest_end(skb, pg);
1224     return 0;
1225 }
1226 
1227 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1228 {
1229     struct nlattr *cee, *app;
1230     struct dcb_app_type *itr;
1231     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1232     int dcbx, i, err = -EMSGSIZE;
1233     u8 value;
1234 
1235     if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1236         goto nla_put_failure;
1237     cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
1238     if (!cee)
1239         goto nla_put_failure;
1240 
1241     /* local pg */
1242     if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1243         err = dcbnl_cee_pg_fill(skb, netdev, 1);
1244         if (err)
1245             goto nla_put_failure;
1246     }
1247 
1248     if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1249         err = dcbnl_cee_pg_fill(skb, netdev, 0);
1250         if (err)
1251             goto nla_put_failure;
1252     }
1253 
1254     /* local pfc */
1255     if (ops->getpfccfg) {
1256         struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
1257                                 DCB_ATTR_CEE_PFC);
1258 
1259         if (!pfc_nest)
1260             goto nla_put_failure;
1261 
1262         for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1263             ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1264             if (nla_put_u8(skb, i, value))
1265                 goto nla_put_failure;
1266         }
1267         nla_nest_end(skb, pfc_nest);
1268     }
1269 
1270     /* local app */
1271     spin_lock_bh(&dcb_lock);
1272     app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
1273     if (!app)
1274         goto dcb_unlock;
1275 
1276     list_for_each_entry(itr, &dcb_app_list, list) {
1277         if (itr->ifindex == netdev->ifindex) {
1278             struct nlattr *app_nest = nla_nest_start_noflag(skb,
1279                                     DCB_ATTR_APP);
1280             if (!app_nest)
1281                 goto dcb_unlock;
1282 
1283             err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1284                      itr->app.selector);
1285             if (err)
1286                 goto dcb_unlock;
1287 
1288             err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1289                       itr->app.protocol);
1290             if (err)
1291                 goto dcb_unlock;
1292 
1293             err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1294                      itr->app.priority);
1295             if (err)
1296                 goto dcb_unlock;
1297 
1298             nla_nest_end(skb, app_nest);
1299         }
1300     }
1301     nla_nest_end(skb, app);
1302 
1303     if (netdev->dcbnl_ops->getdcbx)
1304         dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1305     else
1306         dcbx = -EOPNOTSUPP;
1307 
1308     spin_unlock_bh(&dcb_lock);
1309 
1310     /* features flags */
1311     if (ops->getfeatcfg) {
1312         struct nlattr *feat = nla_nest_start_noflag(skb,
1313                                 DCB_ATTR_CEE_FEAT);
1314         if (!feat)
1315             goto nla_put_failure;
1316 
1317         for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1318              i++)
1319             if (!ops->getfeatcfg(netdev, i, &value) &&
1320                 nla_put_u8(skb, i, value))
1321                 goto nla_put_failure;
1322 
1323         nla_nest_end(skb, feat);
1324     }
1325 
1326     /* peer info if available */
1327     if (ops->cee_peer_getpg) {
1328         struct cee_pg pg;
1329         memset(&pg, 0, sizeof(pg));
1330         err = ops->cee_peer_getpg(netdev, &pg);
1331         if (!err &&
1332             nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1333             goto nla_put_failure;
1334     }
1335 
1336     if (ops->cee_peer_getpfc) {
1337         struct cee_pfc pfc;
1338         memset(&pfc, 0, sizeof(pfc));
1339         err = ops->cee_peer_getpfc(netdev, &pfc);
1340         if (!err &&
1341             nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1342             goto nla_put_failure;
1343     }
1344 
1345     if (ops->peer_getappinfo && ops->peer_getapptable) {
1346         err = dcbnl_build_peer_app(netdev, skb,
1347                        DCB_ATTR_CEE_PEER_APP_TABLE,
1348                        DCB_ATTR_CEE_PEER_APP_INFO,
1349                        DCB_ATTR_CEE_PEER_APP);
1350         if (err)
1351             goto nla_put_failure;
1352     }
1353     nla_nest_end(skb, cee);
1354 
1355     /* DCBX state */
1356     if (dcbx >= 0) {
1357         err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1358         if (err)
1359             goto nla_put_failure;
1360     }
1361     return 0;
1362 
1363 dcb_unlock:
1364     spin_unlock_bh(&dcb_lock);
1365 nla_put_failure:
1366     err = -EMSGSIZE;
1367     return err;
1368 }
1369 
1370 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1371             u32 seq, u32 portid, int dcbx_ver)
1372 {
1373     struct net *net = dev_net(dev);
1374     struct sk_buff *skb;
1375     struct nlmsghdr *nlh;
1376     const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1377     int err;
1378 
1379     if (!ops)
1380         return -EOPNOTSUPP;
1381 
1382     skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1383     if (!skb)
1384         return -ENOMEM;
1385 
1386     if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1387         err = dcbnl_ieee_fill(skb, dev);
1388     else
1389         err = dcbnl_cee_fill(skb, dev);
1390 
1391     if (err < 0) {
1392         /* Report error to broadcast listeners */
1393         nlmsg_free(skb);
1394         rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1395     } else {
1396         /* End nlmsg and notify broadcast listeners */
1397         nlmsg_end(skb, nlh);
1398         rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1399     }
1400 
1401     return err;
1402 }
1403 
1404 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1405               u32 seq, u32 portid)
1406 {
1407     return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1408 }
1409 EXPORT_SYMBOL(dcbnl_ieee_notify);
1410 
1411 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1412              u32 seq, u32 portid)
1413 {
1414     return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1415 }
1416 EXPORT_SYMBOL(dcbnl_cee_notify);
1417 
1418 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1419  * If any requested operation can not be completed
1420  * the entire msg is aborted and error value is returned.
1421  * No attempt is made to reconcile the case where only part of the
1422  * cmd can be completed.
1423  */
1424 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1425               u32 seq, struct nlattr **tb, struct sk_buff *skb)
1426 {
1427     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1428     struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1429     int prio;
1430     int err;
1431 
1432     if (!ops)
1433         return -EOPNOTSUPP;
1434 
1435     if (!tb[DCB_ATTR_IEEE])
1436         return -EINVAL;
1437 
1438     err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1439                       tb[DCB_ATTR_IEEE],
1440                       dcbnl_ieee_policy, NULL);
1441     if (err)
1442         return err;
1443 
1444     if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1445         struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1446         err = ops->ieee_setets(netdev, ets);
1447         if (err)
1448             goto err;
1449     }
1450 
1451     if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1452         struct ieee_maxrate *maxrate =
1453             nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1454         err = ops->ieee_setmaxrate(netdev, maxrate);
1455         if (err)
1456             goto err;
1457     }
1458 
1459     if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1460         struct ieee_qcn *qcn =
1461             nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1462 
1463         err = ops->ieee_setqcn(netdev, qcn);
1464         if (err)
1465             goto err;
1466     }
1467 
1468     if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1469         struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1470         err = ops->ieee_setpfc(netdev, pfc);
1471         if (err)
1472             goto err;
1473     }
1474 
1475     if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
1476         struct dcbnl_buffer *buffer =
1477             nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1478 
1479         for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1480             if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1481                 err = -EINVAL;
1482                 goto err;
1483             }
1484         }
1485 
1486         err = ops->dcbnl_setbuffer(netdev, buffer);
1487         if (err)
1488             goto err;
1489     }
1490 
1491     if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1492         struct nlattr *attr;
1493         int rem;
1494 
1495         nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1496             struct dcb_app *app_data;
1497 
1498             if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1499                 continue;
1500 
1501             if (nla_len(attr) < sizeof(struct dcb_app)) {
1502                 err = -ERANGE;
1503                 goto err;
1504             }
1505 
1506             app_data = nla_data(attr);
1507             if (ops->ieee_setapp)
1508                 err = ops->ieee_setapp(netdev, app_data);
1509             else
1510                 err = dcb_ieee_setapp(netdev, app_data);
1511             if (err)
1512                 goto err;
1513         }
1514     }
1515 
1516 err:
1517     err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1518     dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1519     return err;
1520 }
1521 
1522 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1523               u32 seq, struct nlattr **tb, struct sk_buff *skb)
1524 {
1525     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1526 
1527     if (!ops)
1528         return -EOPNOTSUPP;
1529 
1530     return dcbnl_ieee_fill(skb, netdev);
1531 }
1532 
1533 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1534               u32 seq, struct nlattr **tb, struct sk_buff *skb)
1535 {
1536     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1537     struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1538     int err;
1539 
1540     if (!ops)
1541         return -EOPNOTSUPP;
1542 
1543     if (!tb[DCB_ATTR_IEEE])
1544         return -EINVAL;
1545 
1546     err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1547                       tb[DCB_ATTR_IEEE],
1548                       dcbnl_ieee_policy, NULL);
1549     if (err)
1550         return err;
1551 
1552     if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1553         struct nlattr *attr;
1554         int rem;
1555 
1556         nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1557             struct dcb_app *app_data;
1558 
1559             if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1560                 continue;
1561             app_data = nla_data(attr);
1562             if (ops->ieee_delapp)
1563                 err = ops->ieee_delapp(netdev, app_data);
1564             else
1565                 err = dcb_ieee_delapp(netdev, app_data);
1566             if (err)
1567                 goto err;
1568         }
1569     }
1570 
1571 err:
1572     err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1573     dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1574     return err;
1575 }
1576 
1577 
1578 /* DCBX configuration */
1579 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1580              u32 seq, struct nlattr **tb, struct sk_buff *skb)
1581 {
1582     if (!netdev->dcbnl_ops->getdcbx)
1583         return -EOPNOTSUPP;
1584 
1585     return nla_put_u8(skb, DCB_ATTR_DCBX,
1586               netdev->dcbnl_ops->getdcbx(netdev));
1587 }
1588 
1589 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1590              u32 seq, struct nlattr **tb, struct sk_buff *skb)
1591 {
1592     u8 value;
1593 
1594     if (!netdev->dcbnl_ops->setdcbx)
1595         return -EOPNOTSUPP;
1596 
1597     if (!tb[DCB_ATTR_DCBX])
1598         return -EINVAL;
1599 
1600     value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1601 
1602     return nla_put_u8(skb, DCB_ATTR_DCBX,
1603               netdev->dcbnl_ops->setdcbx(netdev, value));
1604 }
1605 
1606 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1607                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1608 {
1609     struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1610     u8 value;
1611     int ret, i;
1612     int getall = 0;
1613 
1614     if (!netdev->dcbnl_ops->getfeatcfg)
1615         return -EOPNOTSUPP;
1616 
1617     if (!tb[DCB_ATTR_FEATCFG])
1618         return -EINVAL;
1619 
1620     ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1621                       tb[DCB_ATTR_FEATCFG],
1622                       dcbnl_featcfg_nest, NULL);
1623     if (ret)
1624         return ret;
1625 
1626     nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
1627     if (!nest)
1628         return -EMSGSIZE;
1629 
1630     if (data[DCB_FEATCFG_ATTR_ALL])
1631         getall = 1;
1632 
1633     for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1634         if (!getall && !data[i])
1635             continue;
1636 
1637         ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1638         if (!ret)
1639             ret = nla_put_u8(skb, i, value);
1640 
1641         if (ret) {
1642             nla_nest_cancel(skb, nest);
1643             goto nla_put_failure;
1644         }
1645     }
1646     nla_nest_end(skb, nest);
1647 
1648 nla_put_failure:
1649     return ret;
1650 }
1651 
1652 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1653                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1654 {
1655     struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1656     int ret, i;
1657     u8 value;
1658 
1659     if (!netdev->dcbnl_ops->setfeatcfg)
1660         return -ENOTSUPP;
1661 
1662     if (!tb[DCB_ATTR_FEATCFG])
1663         return -EINVAL;
1664 
1665     ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1666                       tb[DCB_ATTR_FEATCFG],
1667                       dcbnl_featcfg_nest, NULL);
1668 
1669     if (ret)
1670         goto err;
1671 
1672     for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1673         if (data[i] == NULL)
1674             continue;
1675 
1676         value = nla_get_u8(data[i]);
1677 
1678         ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1679 
1680         if (ret)
1681             goto err;
1682     }
1683 err:
1684     ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1685 
1686     return ret;
1687 }
1688 
1689 /* Handle CEE DCBX GET commands. */
1690 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1691              u32 seq, struct nlattr **tb, struct sk_buff *skb)
1692 {
1693     const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1694 
1695     if (!ops)
1696         return -EOPNOTSUPP;
1697 
1698     return dcbnl_cee_fill(skb, netdev);
1699 }
1700 
1701 struct reply_func {
1702     /* reply netlink message type */
1703     int type;
1704 
1705     /* function to fill message contents */
1706     int   (*cb)(struct net_device *, struct nlmsghdr *, u32,
1707             struct nlattr **, struct sk_buff *);
1708 };
1709 
1710 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1711     [DCB_CMD_GSTATE]    = { RTM_GETDCB, dcbnl_getstate },
1712     [DCB_CMD_SSTATE]    = { RTM_SETDCB, dcbnl_setstate },
1713     [DCB_CMD_PFC_GCFG]  = { RTM_GETDCB, dcbnl_getpfccfg },
1714     [DCB_CMD_PFC_SCFG]  = { RTM_SETDCB, dcbnl_setpfccfg },
1715     [DCB_CMD_GPERM_HWADDR]  = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1716     [DCB_CMD_GCAP]      = { RTM_GETDCB, dcbnl_getcap },
1717     [DCB_CMD_GNUMTCS]   = { RTM_GETDCB, dcbnl_getnumtcs },
1718     [DCB_CMD_SNUMTCS]   = { RTM_SETDCB, dcbnl_setnumtcs },
1719     [DCB_CMD_PFC_GSTATE]    = { RTM_GETDCB, dcbnl_getpfcstate },
1720     [DCB_CMD_PFC_SSTATE]    = { RTM_SETDCB, dcbnl_setpfcstate },
1721     [DCB_CMD_GAPP]      = { RTM_GETDCB, dcbnl_getapp },
1722     [DCB_CMD_SAPP]      = { RTM_SETDCB, dcbnl_setapp },
1723     [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1724     [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1725     [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1726     [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1727     [DCB_CMD_SET_ALL]   = { RTM_SETDCB, dcbnl_setall },
1728     [DCB_CMD_BCN_GCFG]  = { RTM_GETDCB, dcbnl_bcn_getcfg },
1729     [DCB_CMD_BCN_SCFG]  = { RTM_SETDCB, dcbnl_bcn_setcfg },
1730     [DCB_CMD_IEEE_GET]  = { RTM_GETDCB, dcbnl_ieee_get },
1731     [DCB_CMD_IEEE_SET]  = { RTM_SETDCB, dcbnl_ieee_set },
1732     [DCB_CMD_IEEE_DEL]  = { RTM_SETDCB, dcbnl_ieee_del },
1733     [DCB_CMD_GDCBX]     = { RTM_GETDCB, dcbnl_getdcbx },
1734     [DCB_CMD_SDCBX]     = { RTM_SETDCB, dcbnl_setdcbx },
1735     [DCB_CMD_GFEATCFG]  = { RTM_GETDCB, dcbnl_getfeatcfg },
1736     [DCB_CMD_SFEATCFG]  = { RTM_SETDCB, dcbnl_setfeatcfg },
1737     [DCB_CMD_CEE_GET]   = { RTM_GETDCB, dcbnl_cee_get },
1738 };
1739 
1740 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1741             struct netlink_ext_ack *extack)
1742 {
1743     struct net *net = sock_net(skb->sk);
1744     struct net_device *netdev;
1745     struct dcbmsg *dcb = nlmsg_data(nlh);
1746     struct nlattr *tb[DCB_ATTR_MAX + 1];
1747     u32 portid = NETLINK_CB(skb).portid;
1748     int ret = -EINVAL;
1749     struct sk_buff *reply_skb;
1750     struct nlmsghdr *reply_nlh = NULL;
1751     const struct reply_func *fn;
1752 
1753     if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1754         return -EPERM;
1755 
1756     ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1757                      dcbnl_rtnl_policy, extack);
1758     if (ret < 0)
1759         return ret;
1760 
1761     if (dcb->cmd > DCB_CMD_MAX)
1762         return -EINVAL;
1763 
1764     /* check if a reply function has been defined for the command */
1765     fn = &reply_funcs[dcb->cmd];
1766     if (!fn->cb)
1767         return -EOPNOTSUPP;
1768     if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
1769         return -EPERM;
1770 
1771     if (!tb[DCB_ATTR_IFNAME])
1772         return -EINVAL;
1773 
1774     netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1775     if (!netdev)
1776         return -ENODEV;
1777 
1778     if (!netdev->dcbnl_ops)
1779         return -EOPNOTSUPP;
1780 
1781     reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1782                  nlh->nlmsg_flags, &reply_nlh);
1783     if (!reply_skb)
1784         return -ENOMEM;
1785 
1786     ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1787     if (ret < 0) {
1788         nlmsg_free(reply_skb);
1789         goto out;
1790     }
1791 
1792     nlmsg_end(reply_skb, reply_nlh);
1793 
1794     ret = rtnl_unicast(reply_skb, net, portid);
1795 out:
1796     return ret;
1797 }
1798 
1799 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1800                        int ifindex, int prio)
1801 {
1802     struct dcb_app_type *itr;
1803 
1804     list_for_each_entry(itr, &dcb_app_list, list) {
1805         if (itr->app.selector == app->selector &&
1806             itr->app.protocol == app->protocol &&
1807             itr->ifindex == ifindex &&
1808             ((prio == -1) || itr->app.priority == prio))
1809             return itr;
1810     }
1811 
1812     return NULL;
1813 }
1814 
1815 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1816 {
1817     struct dcb_app_type *entry;
1818 
1819     entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1820     if (!entry)
1821         return -ENOMEM;
1822 
1823     memcpy(&entry->app, app, sizeof(*app));
1824     entry->ifindex = ifindex;
1825     list_add(&entry->list, &dcb_app_list);
1826 
1827     return 0;
1828 }
1829 
1830 /**
1831  * dcb_getapp - retrieve the DCBX application user priority
1832  * @dev: network interface
1833  * @app: application to get user priority of
1834  *
1835  * On success returns a non-zero 802.1p user priority bitmap
1836  * otherwise returns 0 as the invalid user priority bitmap to
1837  * indicate an error.
1838  */
1839 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1840 {
1841     struct dcb_app_type *itr;
1842     u8 prio = 0;
1843 
1844     spin_lock_bh(&dcb_lock);
1845     itr = dcb_app_lookup(app, dev->ifindex, -1);
1846     if (itr)
1847         prio = itr->app.priority;
1848     spin_unlock_bh(&dcb_lock);
1849 
1850     return prio;
1851 }
1852 EXPORT_SYMBOL(dcb_getapp);
1853 
1854 /**
1855  * dcb_setapp - add CEE dcb application data to app list
1856  * @dev: network interface
1857  * @new: application data to add
1858  *
1859  * Priority 0 is an invalid priority in CEE spec. This routine
1860  * removes applications from the app list if the priority is
1861  * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
1862  */
1863 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1864 {
1865     struct dcb_app_type *itr;
1866     struct dcb_app_type event;
1867     int err = 0;
1868 
1869     event.ifindex = dev->ifindex;
1870     memcpy(&event.app, new, sizeof(event.app));
1871     if (dev->dcbnl_ops->getdcbx)
1872         event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1873 
1874     spin_lock_bh(&dcb_lock);
1875     /* Search for existing match and replace */
1876     itr = dcb_app_lookup(new, dev->ifindex, -1);
1877     if (itr) {
1878         if (new->priority)
1879             itr->app.priority = new->priority;
1880         else {
1881             list_del(&itr->list);
1882             kfree(itr);
1883         }
1884         goto out;
1885     }
1886     /* App type does not exist add new application type */
1887     if (new->priority)
1888         err = dcb_app_add(new, dev->ifindex);
1889 out:
1890     spin_unlock_bh(&dcb_lock);
1891     if (!err)
1892         call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1893     return err;
1894 }
1895 EXPORT_SYMBOL(dcb_setapp);
1896 
1897 /**
1898  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1899  * @dev: network interface
1900  * @app: where to store the retrieve application data
1901  *
1902  * Helper routine which on success returns a non-zero 802.1Qaz user
1903  * priority bitmap otherwise returns 0 to indicate the dcb_app was
1904  * not found in APP list.
1905  */
1906 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1907 {
1908     struct dcb_app_type *itr;
1909     u8 prio = 0;
1910 
1911     spin_lock_bh(&dcb_lock);
1912     itr = dcb_app_lookup(app, dev->ifindex, -1);
1913     if (itr)
1914         prio |= 1 << itr->app.priority;
1915     spin_unlock_bh(&dcb_lock);
1916 
1917     return prio;
1918 }
1919 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1920 
1921 /**
1922  * dcb_ieee_setapp - add IEEE dcb application data to app list
1923  * @dev: network interface
1924  * @new: application data to add
1925  *
1926  * This adds Application data to the list. Multiple application
1927  * entries may exists for the same selector and protocol as long
1928  * as the priorities are different. Priority is expected to be a
1929  * 3-bit unsigned integer
1930  */
1931 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1932 {
1933     struct dcb_app_type event;
1934     int err = 0;
1935 
1936     event.ifindex = dev->ifindex;
1937     memcpy(&event.app, new, sizeof(event.app));
1938     if (dev->dcbnl_ops->getdcbx)
1939         event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1940 
1941     spin_lock_bh(&dcb_lock);
1942     /* Search for existing match and abort if found */
1943     if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1944         err = -EEXIST;
1945         goto out;
1946     }
1947 
1948     err = dcb_app_add(new, dev->ifindex);
1949 out:
1950     spin_unlock_bh(&dcb_lock);
1951     if (!err)
1952         call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1953     return err;
1954 }
1955 EXPORT_SYMBOL(dcb_ieee_setapp);
1956 
1957 /**
1958  * dcb_ieee_delapp - delete IEEE dcb application data from list
1959  * @dev: network interface
1960  * @del: application data to delete
1961  *
1962  * This removes a matching APP data from the APP list
1963  */
1964 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1965 {
1966     struct dcb_app_type *itr;
1967     struct dcb_app_type event;
1968     int err = -ENOENT;
1969 
1970     event.ifindex = dev->ifindex;
1971     memcpy(&event.app, del, sizeof(event.app));
1972     if (dev->dcbnl_ops->getdcbx)
1973         event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1974 
1975     spin_lock_bh(&dcb_lock);
1976     /* Search for existing match and remove it. */
1977     if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1978         list_del(&itr->list);
1979         kfree(itr);
1980         err = 0;
1981     }
1982 
1983     spin_unlock_bh(&dcb_lock);
1984     if (!err)
1985         call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1986     return err;
1987 }
1988 EXPORT_SYMBOL(dcb_ieee_delapp);
1989 
1990 /*
1991  * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
1992  * priorities to the DSCP values assigned to that priority. Initialize p_map
1993  * such that each map element holds a bit mask of DSCP values configured for
1994  * that priority by APP entries.
1995  */
1996 void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
1997                     struct dcb_ieee_app_prio_map *p_map)
1998 {
1999     int ifindex = dev->ifindex;
2000     struct dcb_app_type *itr;
2001     u8 prio;
2002 
2003     memset(p_map->map, 0, sizeof(p_map->map));
2004 
2005     spin_lock_bh(&dcb_lock);
2006     list_for_each_entry(itr, &dcb_app_list, list) {
2007         if (itr->ifindex == ifindex &&
2008             itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2009             itr->app.protocol < 64 &&
2010             itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2011             prio = itr->app.priority;
2012             p_map->map[prio] |= 1ULL << itr->app.protocol;
2013         }
2014     }
2015     spin_unlock_bh(&dcb_lock);
2016 }
2017 EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
2018 
2019 /*
2020  * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
2021  * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
2022  * such that each map element holds a bit mask of priorities configured for a
2023  * given DSCP value by APP entries.
2024  */
2025 void
2026 dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
2027                    struct dcb_ieee_app_dscp_map *p_map)
2028 {
2029     int ifindex = dev->ifindex;
2030     struct dcb_app_type *itr;
2031 
2032     memset(p_map->map, 0, sizeof(p_map->map));
2033 
2034     spin_lock_bh(&dcb_lock);
2035     list_for_each_entry(itr, &dcb_app_list, list) {
2036         if (itr->ifindex == ifindex &&
2037             itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2038             itr->app.protocol < 64 &&
2039             itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2040             p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
2041     }
2042     spin_unlock_bh(&dcb_lock);
2043 }
2044 EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
2045 
2046 /*
2047  * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
2048  * type, with valid PID values >= 1536. A special meaning is then assigned to
2049  * protocol value of 0: "default priority. For use when priority is not
2050  * otherwise specified".
2051  *
2052  * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
2053  * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
2054  * priorities set by these entries.
2055  */
2056 u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
2057 {
2058     int ifindex = dev->ifindex;
2059     struct dcb_app_type *itr;
2060     u8 mask = 0;
2061 
2062     spin_lock_bh(&dcb_lock);
2063     list_for_each_entry(itr, &dcb_app_list, list) {
2064         if (itr->ifindex == ifindex &&
2065             itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
2066             itr->app.protocol == 0 &&
2067             itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2068             mask |= 1 << itr->app.priority;
2069     }
2070     spin_unlock_bh(&dcb_lock);
2071 
2072     return mask;
2073 }
2074 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
2075 
2076 static void dcbnl_flush_dev(struct net_device *dev)
2077 {
2078     struct dcb_app_type *itr, *tmp;
2079 
2080     spin_lock_bh(&dcb_lock);
2081 
2082     list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
2083         if (itr->ifindex == dev->ifindex) {
2084             list_del(&itr->list);
2085             kfree(itr);
2086         }
2087     }
2088 
2089     spin_unlock_bh(&dcb_lock);
2090 }
2091 
2092 static int dcbnl_netdevice_event(struct notifier_block *nb,
2093                  unsigned long event, void *ptr)
2094 {
2095     struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2096 
2097     switch (event) {
2098     case NETDEV_UNREGISTER:
2099         if (!dev->dcbnl_ops)
2100             return NOTIFY_DONE;
2101 
2102         dcbnl_flush_dev(dev);
2103 
2104         return NOTIFY_OK;
2105     default:
2106         return NOTIFY_DONE;
2107     }
2108 }
2109 
2110 static struct notifier_block dcbnl_nb __read_mostly = {
2111     .notifier_call  = dcbnl_netdevice_event,
2112 };
2113 
2114 static int __init dcbnl_init(void)
2115 {
2116     int err;
2117 
2118     err = register_netdevice_notifier(&dcbnl_nb);
2119     if (err)
2120         return err;
2121 
2122     rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
2123     rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
2124 
2125     return 0;
2126 }
2127 device_initcall(dcbnl_init);