Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is part of the Chelsio T4 Ethernet driver for Linux.
0003  *
0004  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <linux/module.h>
0036 #include <linux/netdevice.h>
0037 
0038 #include "cxgb4.h"
0039 #include "sched.h"
0040 
0041 static int t4_sched_class_fw_cmd(struct port_info *pi,
0042                  struct ch_sched_params *p,
0043                  enum sched_fw_ops op)
0044 {
0045     struct adapter *adap = pi->adapter;
0046     struct sched_table *s = pi->sched_tbl;
0047     struct sched_class *e;
0048     int err = 0;
0049 
0050     e = &s->tab[p->u.params.class];
0051     switch (op) {
0052     case SCHED_FW_OP_ADD:
0053     case SCHED_FW_OP_DEL:
0054         err = t4_sched_params(adap, p->type,
0055                       p->u.params.level, p->u.params.mode,
0056                       p->u.params.rateunit,
0057                       p->u.params.ratemode,
0058                       p->u.params.channel, e->idx,
0059                       p->u.params.minrate, p->u.params.maxrate,
0060                       p->u.params.weight, p->u.params.pktsize,
0061                       p->u.params.burstsize);
0062         break;
0063     default:
0064         err = -ENOTSUPP;
0065         break;
0066     }
0067 
0068     return err;
0069 }
0070 
0071 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
0072                    enum sched_bind_type type, bool bind)
0073 {
0074     struct adapter *adap = pi->adapter;
0075     u32 fw_mnem, fw_class, fw_param;
0076     unsigned int pf = adap->pf;
0077     unsigned int vf = 0;
0078     int err = 0;
0079 
0080     switch (type) {
0081     case SCHED_QUEUE: {
0082         struct sched_queue_entry *qe;
0083 
0084         qe = (struct sched_queue_entry *)arg;
0085 
0086         /* Create a template for the FW_PARAMS_CMD mnemonic and
0087          * value (TX Scheduling Class in this case).
0088          */
0089         fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
0090                FW_PARAMS_PARAM_X_V(
0091                    FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
0092         fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
0093         fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
0094 
0095         pf = adap->pf;
0096         vf = 0;
0097 
0098         err = t4_set_params(adap, adap->mbox, pf, vf, 1,
0099                     &fw_param, &fw_class);
0100         break;
0101     }
0102     case SCHED_FLOWC: {
0103         struct sched_flowc_entry *fe;
0104 
0105         fe = (struct sched_flowc_entry *)arg;
0106 
0107         fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
0108         err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
0109                            fe->param.tid, fw_class);
0110         break;
0111     }
0112     default:
0113         err = -ENOTSUPP;
0114         break;
0115     }
0116 
0117     return err;
0118 }
0119 
0120 static void *t4_sched_entry_lookup(struct port_info *pi,
0121                    enum sched_bind_type type,
0122                    const u32 val)
0123 {
0124     struct sched_table *s = pi->sched_tbl;
0125     struct sched_class *e, *end;
0126     void *found = NULL;
0127 
0128     /* Look for an entry with matching @val */
0129     end = &s->tab[s->sched_size];
0130     for (e = &s->tab[0]; e != end; ++e) {
0131         if (e->state == SCHED_STATE_UNUSED ||
0132             e->bind_type != type)
0133             continue;
0134 
0135         switch (type) {
0136         case SCHED_QUEUE: {
0137             struct sched_queue_entry *qe;
0138 
0139             list_for_each_entry(qe, &e->entry_list, list) {
0140                 if (qe->cntxt_id == val) {
0141                     found = qe;
0142                     break;
0143                 }
0144             }
0145             break;
0146         }
0147         case SCHED_FLOWC: {
0148             struct sched_flowc_entry *fe;
0149 
0150             list_for_each_entry(fe, &e->entry_list, list) {
0151                 if (fe->param.tid == val) {
0152                     found = fe;
0153                     break;
0154                 }
0155             }
0156             break;
0157         }
0158         default:
0159             return NULL;
0160         }
0161 
0162         if (found)
0163             break;
0164     }
0165 
0166     return found;
0167 }
0168 
0169 struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
0170                          struct ch_sched_queue *p)
0171 {
0172     struct port_info *pi = netdev2pinfo(dev);
0173     struct sched_queue_entry *qe = NULL;
0174     struct adapter *adap = pi->adapter;
0175     struct sge_eth_txq *txq;
0176 
0177     if (p->queue < 0 || p->queue >= pi->nqsets)
0178         return NULL;
0179 
0180     txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
0181     qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
0182     return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
0183 }
0184 
0185 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
0186 {
0187     struct sched_queue_entry *qe = NULL;
0188     struct adapter *adap = pi->adapter;
0189     struct sge_eth_txq *txq;
0190     struct sched_class *e;
0191     int err = 0;
0192 
0193     if (p->queue < 0 || p->queue >= pi->nqsets)
0194         return -ERANGE;
0195 
0196     txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
0197 
0198     /* Find the existing entry that the queue is bound to */
0199     qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
0200     if (qe) {
0201         err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
0202                           false);
0203         if (err)
0204             return err;
0205 
0206         e = &pi->sched_tbl->tab[qe->param.class];
0207         list_del(&qe->list);
0208         kvfree(qe);
0209         if (atomic_dec_and_test(&e->refcnt))
0210             cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
0211     }
0212     return err;
0213 }
0214 
0215 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
0216 {
0217     struct sched_table *s = pi->sched_tbl;
0218     struct sched_queue_entry *qe = NULL;
0219     struct adapter *adap = pi->adapter;
0220     struct sge_eth_txq *txq;
0221     struct sched_class *e;
0222     unsigned int qid;
0223     int err = 0;
0224 
0225     if (p->queue < 0 || p->queue >= pi->nqsets)
0226         return -ERANGE;
0227 
0228     qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
0229     if (!qe)
0230         return -ENOMEM;
0231 
0232     txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
0233     qid = txq->q.cntxt_id;
0234 
0235     /* Unbind queue from any existing class */
0236     err = t4_sched_queue_unbind(pi, p);
0237     if (err)
0238         goto out_err;
0239 
0240     /* Bind queue to specified class */
0241     qe->cntxt_id = qid;
0242     memcpy(&qe->param, p, sizeof(qe->param));
0243 
0244     e = &s->tab[qe->param.class];
0245     err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
0246     if (err)
0247         goto out_err;
0248 
0249     list_add_tail(&qe->list, &e->entry_list);
0250     e->bind_type = SCHED_QUEUE;
0251     atomic_inc(&e->refcnt);
0252     return err;
0253 
0254 out_err:
0255     kvfree(qe);
0256     return err;
0257 }
0258 
0259 static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
0260 {
0261     struct sched_flowc_entry *fe = NULL;
0262     struct adapter *adap = pi->adapter;
0263     struct sched_class *e;
0264     int err = 0;
0265 
0266     if (p->tid < 0 || p->tid >= adap->tids.neotids)
0267         return -ERANGE;
0268 
0269     /* Find the existing entry that the flowc is bound to */
0270     fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
0271     if (fe) {
0272         err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
0273                           false);
0274         if (err)
0275             return err;
0276 
0277         e = &pi->sched_tbl->tab[fe->param.class];
0278         list_del(&fe->list);
0279         kvfree(fe);
0280         if (atomic_dec_and_test(&e->refcnt))
0281             cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
0282     }
0283     return err;
0284 }
0285 
0286 static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
0287 {
0288     struct sched_table *s = pi->sched_tbl;
0289     struct sched_flowc_entry *fe = NULL;
0290     struct adapter *adap = pi->adapter;
0291     struct sched_class *e;
0292     int err = 0;
0293 
0294     if (p->tid < 0 || p->tid >= adap->tids.neotids)
0295         return -ERANGE;
0296 
0297     fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
0298     if (!fe)
0299         return -ENOMEM;
0300 
0301     /* Unbind flowc from any existing class */
0302     err = t4_sched_flowc_unbind(pi, p);
0303     if (err)
0304         goto out_err;
0305 
0306     /* Bind flowc to specified class */
0307     memcpy(&fe->param, p, sizeof(fe->param));
0308 
0309     e = &s->tab[fe->param.class];
0310     err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
0311     if (err)
0312         goto out_err;
0313 
0314     list_add_tail(&fe->list, &e->entry_list);
0315     e->bind_type = SCHED_FLOWC;
0316     atomic_inc(&e->refcnt);
0317     return err;
0318 
0319 out_err:
0320     kvfree(fe);
0321     return err;
0322 }
0323 
0324 static void t4_sched_class_unbind_all(struct port_info *pi,
0325                       struct sched_class *e,
0326                       enum sched_bind_type type)
0327 {
0328     if (!e)
0329         return;
0330 
0331     switch (type) {
0332     case SCHED_QUEUE: {
0333         struct sched_queue_entry *qe;
0334 
0335         list_for_each_entry(qe, &e->entry_list, list)
0336             t4_sched_queue_unbind(pi, &qe->param);
0337         break;
0338     }
0339     case SCHED_FLOWC: {
0340         struct sched_flowc_entry *fe;
0341 
0342         list_for_each_entry(fe, &e->entry_list, list)
0343             t4_sched_flowc_unbind(pi, &fe->param);
0344         break;
0345     }
0346     default:
0347         break;
0348     }
0349 }
0350 
0351 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
0352                      enum sched_bind_type type, bool bind)
0353 {
0354     int err = 0;
0355 
0356     if (!arg)
0357         return -EINVAL;
0358 
0359     switch (type) {
0360     case SCHED_QUEUE: {
0361         struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
0362 
0363         if (bind)
0364             err = t4_sched_queue_bind(pi, qe);
0365         else
0366             err = t4_sched_queue_unbind(pi, qe);
0367         break;
0368     }
0369     case SCHED_FLOWC: {
0370         struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
0371 
0372         if (bind)
0373             err = t4_sched_flowc_bind(pi, fe);
0374         else
0375             err = t4_sched_flowc_unbind(pi, fe);
0376         break;
0377     }
0378     default:
0379         err = -ENOTSUPP;
0380         break;
0381     }
0382 
0383     return err;
0384 }
0385 
0386 /**
0387  * cxgb4_sched_class_bind - Bind an entity to a scheduling class
0388  * @dev: net_device pointer
0389  * @arg: Entity opaque data
0390  * @type: Entity type (Queue)
0391  *
0392  * Binds an entity (queue) to a scheduling class.  If the entity
0393  * is bound to another class, it will be unbound from the other class
0394  * and bound to the class specified in @arg.
0395  */
0396 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
0397                enum sched_bind_type type)
0398 {
0399     struct port_info *pi = netdev2pinfo(dev);
0400     u8 class_id;
0401 
0402     if (!can_sched(dev))
0403         return -ENOTSUPP;
0404 
0405     if (!arg)
0406         return -EINVAL;
0407 
0408     switch (type) {
0409     case SCHED_QUEUE: {
0410         struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
0411 
0412         class_id = qe->class;
0413         break;
0414     }
0415     case SCHED_FLOWC: {
0416         struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
0417 
0418         class_id = fe->class;
0419         break;
0420     }
0421     default:
0422         return -ENOTSUPP;
0423     }
0424 
0425     if (!valid_class_id(dev, class_id))
0426         return -EINVAL;
0427 
0428     if (class_id == SCHED_CLS_NONE)
0429         return -ENOTSUPP;
0430 
0431     return t4_sched_class_bind_unbind_op(pi, arg, type, true);
0432 
0433 }
0434 
0435 /**
0436  * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
0437  * @dev: net_device pointer
0438  * @arg: Entity opaque data
0439  * @type: Entity type (Queue)
0440  *
0441  * Unbinds an entity (queue) from a scheduling class.
0442  */
0443 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
0444                  enum sched_bind_type type)
0445 {
0446     struct port_info *pi = netdev2pinfo(dev);
0447     u8 class_id;
0448 
0449     if (!can_sched(dev))
0450         return -ENOTSUPP;
0451 
0452     if (!arg)
0453         return -EINVAL;
0454 
0455     switch (type) {
0456     case SCHED_QUEUE: {
0457         struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
0458 
0459         class_id = qe->class;
0460         break;
0461     }
0462     case SCHED_FLOWC: {
0463         struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
0464 
0465         class_id = fe->class;
0466         break;
0467     }
0468     default:
0469         return -ENOTSUPP;
0470     }
0471 
0472     if (!valid_class_id(dev, class_id))
0473         return -EINVAL;
0474 
0475     return t4_sched_class_bind_unbind_op(pi, arg, type, false);
0476 }
0477 
0478 /* If @p is NULL, fetch any available unused class */
0479 static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
0480                         const struct ch_sched_params *p)
0481 {
0482     struct sched_table *s = pi->sched_tbl;
0483     struct sched_class *found = NULL;
0484     struct sched_class *e, *end;
0485 
0486     if (!p) {
0487         /* Get any available unused class */
0488         end = &s->tab[s->sched_size];
0489         for (e = &s->tab[0]; e != end; ++e) {
0490             if (e->state == SCHED_STATE_UNUSED) {
0491                 found = e;
0492                 break;
0493             }
0494         }
0495     } else {
0496         /* Look for a class with matching scheduling parameters */
0497         struct ch_sched_params info;
0498         struct ch_sched_params tp;
0499 
0500         memcpy(&tp, p, sizeof(tp));
0501         /* Don't try to match class parameter */
0502         tp.u.params.class = SCHED_CLS_NONE;
0503 
0504         end = &s->tab[s->sched_size];
0505         for (e = &s->tab[0]; e != end; ++e) {
0506             if (e->state == SCHED_STATE_UNUSED)
0507                 continue;
0508 
0509             memcpy(&info, &e->info, sizeof(info));
0510             /* Don't try to match class parameter */
0511             info.u.params.class = SCHED_CLS_NONE;
0512 
0513             if ((info.type == tp.type) &&
0514                 (!memcmp(&info.u.params, &tp.u.params,
0515                      sizeof(info.u.params)))) {
0516                 found = e;
0517                 break;
0518             }
0519         }
0520     }
0521 
0522     return found;
0523 }
0524 
0525 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
0526                         struct ch_sched_params *p)
0527 {
0528     struct sched_class *e = NULL;
0529     u8 class_id;
0530     int err;
0531 
0532     if (!p)
0533         return NULL;
0534 
0535     class_id = p->u.params.class;
0536 
0537     /* Only accept search for existing class with matching params
0538      * or allocation of new class with specified params
0539      */
0540     if (class_id != SCHED_CLS_NONE)
0541         return NULL;
0542 
0543     /* See if there's an exisiting class with same requested sched
0544      * params. Classes can only be shared among FLOWC types. For
0545      * other types, always request a new class.
0546      */
0547     if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
0548         e = t4_sched_class_lookup(pi, p);
0549 
0550     if (!e) {
0551         struct ch_sched_params np;
0552 
0553         /* Fetch any available unused class */
0554         e = t4_sched_class_lookup(pi, NULL);
0555         if (!e)
0556             return NULL;
0557 
0558         memcpy(&np, p, sizeof(np));
0559         np.u.params.class = e->idx;
0560         /* New class */
0561         err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
0562         if (err)
0563             return NULL;
0564         memcpy(&e->info, &np, sizeof(e->info));
0565         atomic_set(&e->refcnt, 0);
0566         e->state = SCHED_STATE_ACTIVE;
0567     }
0568 
0569     return e;
0570 }
0571 
0572 /**
0573  * cxgb4_sched_class_alloc - allocate a scheduling class
0574  * @dev: net_device pointer
0575  * @p: new scheduling class to create.
0576  *
0577  * Returns pointer to the scheduling class created.  If @p is NULL, then
0578  * it allocates and returns any available unused scheduling class. If a
0579  * scheduling class with matching @p is found, then the matching class is
0580  * returned.
0581  */
0582 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
0583                         struct ch_sched_params *p)
0584 {
0585     struct port_info *pi = netdev2pinfo(dev);
0586     u8 class_id;
0587 
0588     if (!can_sched(dev))
0589         return NULL;
0590 
0591     class_id = p->u.params.class;
0592     if (!valid_class_id(dev, class_id))
0593         return NULL;
0594 
0595     return t4_sched_class_alloc(pi, p);
0596 }
0597 
0598 /**
0599  * cxgb4_sched_class_free - free a scheduling class
0600  * @dev: net_device pointer
0601  * @classid: scheduling class id to free
0602  *
0603  * Frees a scheduling class if there are no users.
0604  */
0605 void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
0606 {
0607     struct port_info *pi = netdev2pinfo(dev);
0608     struct sched_table *s = pi->sched_tbl;
0609     struct ch_sched_params p;
0610     struct sched_class *e;
0611     u32 speed;
0612     int ret;
0613 
0614     e = &s->tab[classid];
0615     if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
0616         /* Port based rate limiting needs explicit reset back
0617          * to max rate. But, we'll do explicit reset for all
0618          * types, instead of just port based type, to be on
0619          * the safer side.
0620          */
0621         memcpy(&p, &e->info, sizeof(p));
0622         /* Always reset mode to 0. Otherwise, FLOWC mode will
0623          * still be enabled even after resetting the traffic
0624          * class.
0625          */
0626         p.u.params.mode = 0;
0627         p.u.params.minrate = 0;
0628         p.u.params.pktsize = 0;
0629 
0630         ret = t4_get_link_params(pi, NULL, &speed, NULL);
0631         if (!ret)
0632             p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
0633         else
0634             p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
0635 
0636         t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
0637 
0638         e->state = SCHED_STATE_UNUSED;
0639         memset(&e->info, 0, sizeof(e->info));
0640     }
0641 }
0642 
0643 static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
0644 {
0645     struct port_info *pi = netdev2pinfo(dev);
0646 
0647     t4_sched_class_unbind_all(pi, e, e->bind_type);
0648     cxgb4_sched_class_free(dev, e->idx);
0649 }
0650 
0651 struct sched_table *t4_init_sched(unsigned int sched_size)
0652 {
0653     struct sched_table *s;
0654     unsigned int i;
0655 
0656     s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
0657     if (!s)
0658         return NULL;
0659 
0660     s->sched_size = sched_size;
0661 
0662     for (i = 0; i < s->sched_size; i++) {
0663         memset(&s->tab[i], 0, sizeof(struct sched_class));
0664         s->tab[i].idx = i;
0665         s->tab[i].state = SCHED_STATE_UNUSED;
0666         INIT_LIST_HEAD(&s->tab[i].entry_list);
0667         atomic_set(&s->tab[i].refcnt, 0);
0668     }
0669     return s;
0670 }
0671 
0672 void t4_cleanup_sched(struct adapter *adap)
0673 {
0674     struct sched_table *s;
0675     unsigned int j, i;
0676 
0677     for_each_port(adap, j) {
0678         struct port_info *pi = netdev2pinfo(adap->port[j]);
0679 
0680         s = pi->sched_tbl;
0681         if (!s)
0682             continue;
0683 
0684         for (i = 0; i < s->sched_size; i++) {
0685             struct sched_class *e;
0686 
0687             e = &s->tab[i];
0688             if (e->state == SCHED_STATE_ACTIVE)
0689                 t4_sched_class_free(adap->port[j], e);
0690         }
0691         kvfree(s);
0692     }
0693 }