Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
0002 /* Copyright (C) 2018 Netronome Systems, Inc. */
0003 
0004 #include "main.h"
0005 
0006 /* LAG group config flags. */
0007 #define NFP_FL_LAG_LAST         BIT(1)
0008 #define NFP_FL_LAG_FIRST        BIT(2)
0009 #define NFP_FL_LAG_DATA         BIT(3)
0010 #define NFP_FL_LAG_XON          BIT(4)
0011 #define NFP_FL_LAG_SYNC         BIT(5)
0012 #define NFP_FL_LAG_SWITCH       BIT(6)
0013 #define NFP_FL_LAG_RESET        BIT(7)
0014 
0015 /* LAG port state flags. */
0016 #define NFP_PORT_LAG_LINK_UP        BIT(0)
0017 #define NFP_PORT_LAG_TX_ENABLED     BIT(1)
0018 #define NFP_PORT_LAG_CHANGED        BIT(2)
0019 
0020 enum nfp_fl_lag_batch {
0021     NFP_FL_LAG_BATCH_FIRST,
0022     NFP_FL_LAG_BATCH_MEMBER,
0023     NFP_FL_LAG_BATCH_FINISHED
0024 };
0025 
0026 /**
0027  * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
0028  * @ctrl_flags: Configuration flags
0029  * @reserved:   Reserved for future use
0030  * @ttl:    Time to live of packet - host always sets to 0xff
0031  * @pkt_number: Config message packet number - increment for each message
0032  * @batch_ver:  Batch version of messages - increment for each batch of messages
0033  * @group_id:   Group ID applicable
0034  * @group_inst: Group instance number - increment when group is reused
0035  * @members:    Array of 32-bit words listing all active group members
0036  */
0037 struct nfp_flower_cmsg_lag_config {
0038     u8 ctrl_flags;
0039     u8 reserved[2];
0040     u8 ttl;
0041     __be32 pkt_number;
0042     __be32 batch_ver;
0043     __be32 group_id;
0044     __be32 group_inst;
0045     __be32 members[];
0046 };
0047 
0048 /**
0049  * struct nfp_fl_lag_group - list entry for each LAG group
0050  * @group_id:       Assigned group ID for host/kernel sync
0051  * @group_inst:     Group instance in case of ID reuse
0052  * @list:       List entry
0053  * @master_ndev:    Group master Netdev
0054  * @dirty:      Marked if the group needs synced to HW
0055  * @offloaded:      Marked if the group is currently offloaded to NIC
0056  * @to_remove:      Marked if the group should be removed from NIC
0057  * @to_destroy:     Marked if the group should be removed from driver
0058  * @slave_cnt:      Number of slaves in group
0059  */
0060 struct nfp_fl_lag_group {
0061     unsigned int group_id;
0062     u8 group_inst;
0063     struct list_head list;
0064     struct net_device *master_ndev;
0065     bool dirty;
0066     bool offloaded;
0067     bool to_remove;
0068     bool to_destroy;
0069     unsigned int slave_cnt;
0070 };
0071 
0072 #define NFP_FL_LAG_PKT_NUMBER_MASK  GENMASK(30, 0)
0073 #define NFP_FL_LAG_VERSION_MASK     GENMASK(22, 0)
0074 #define NFP_FL_LAG_HOST_TTL     0xff
0075 
0076 /* Use this ID with zero members to ack a batch config */
0077 #define NFP_FL_LAG_SYNC_ID      0
0078 #define NFP_FL_LAG_GROUP_MIN        1 /* ID 0 reserved */
0079 #define NFP_FL_LAG_GROUP_MAX        32 /* IDs 1 to 31 are valid */
0080 
0081 /* wait for more config */
0082 #define NFP_FL_LAG_DELAY        (msecs_to_jiffies(2))
0083 
0084 #define NFP_FL_LAG_RETRANS_LIMIT    100 /* max retrans cmsgs to store */
0085 
0086 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
0087 {
0088     lag->pkt_num++;
0089     lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
0090 
0091     return lag->pkt_num;
0092 }
0093 
0094 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
0095 {
0096     /* LSB is not considered by firmware so add 2 for each increment. */
0097     lag->batch_ver += 2;
0098     lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
0099 
0100     /* Zero is reserved by firmware. */
0101     if (!lag->batch_ver)
0102         lag->batch_ver += 2;
0103 }
0104 
0105 static struct nfp_fl_lag_group *
0106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
0107 {
0108     struct nfp_fl_lag_group *group;
0109     struct nfp_flower_priv *priv;
0110     int id;
0111 
0112     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0113 
0114     id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
0115                 NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
0116     if (id < 0) {
0117         nfp_flower_cmsg_warn(priv->app,
0118                      "No more bonding groups available\n");
0119         return ERR_PTR(id);
0120     }
0121 
0122     group = kmalloc(sizeof(*group), GFP_KERNEL);
0123     if (!group) {
0124         ida_simple_remove(&lag->ida_handle, id);
0125         return ERR_PTR(-ENOMEM);
0126     }
0127 
0128     group->group_id = id;
0129     group->master_ndev = master;
0130     group->dirty = true;
0131     group->offloaded = false;
0132     group->to_remove = false;
0133     group->to_destroy = false;
0134     group->slave_cnt = 0;
0135     group->group_inst = ++lag->global_inst;
0136     list_add_tail(&group->list, &lag->group_list);
0137 
0138     return group;
0139 }
0140 
0141 static struct nfp_fl_lag_group *
0142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
0143                       struct net_device *master)
0144 {
0145     struct nfp_fl_lag_group *entry;
0146 
0147     if (!master)
0148         return NULL;
0149 
0150     list_for_each_entry(entry, &lag->group_list, list)
0151         if (entry->master_ndev == master)
0152             return entry;
0153 
0154     return NULL;
0155 }
0156 
0157 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
0158                        struct net_device *master,
0159                        struct nfp_fl_pre_lag *pre_act,
0160                        struct netlink_ext_ack *extack)
0161 {
0162     struct nfp_flower_priv *priv = app->priv;
0163     struct nfp_fl_lag_group *group = NULL;
0164     __be32 temp_vers;
0165 
0166     mutex_lock(&priv->nfp_lag.lock);
0167     group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
0168                               master);
0169     if (!group) {
0170         mutex_unlock(&priv->nfp_lag.lock);
0171         NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
0172         return -ENOENT;
0173     }
0174 
0175     pre_act->group_id = cpu_to_be16(group->group_id);
0176     temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
0177                 NFP_FL_PRE_LAG_VER_OFF);
0178     memcpy(pre_act->lag_version, &temp_vers, 3);
0179     pre_act->instance = group->group_inst;
0180     mutex_unlock(&priv->nfp_lag.lock);
0181 
0182     return 0;
0183 }
0184 
0185 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
0186 {
0187     struct nfp_flower_priv *priv = app->priv;
0188     struct nfp_fl_lag_group *group = NULL;
0189     int group_id = -ENOENT;
0190 
0191     mutex_lock(&priv->nfp_lag.lock);
0192     group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
0193                               master);
0194     if (group)
0195         group_id = group->group_id;
0196     mutex_unlock(&priv->nfp_lag.lock);
0197 
0198     return group_id;
0199 }
0200 
0201 static int
0202 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
0203             struct net_device **active_members,
0204             unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
0205 {
0206     struct nfp_flower_cmsg_lag_config *cmsg_payload;
0207     struct nfp_flower_priv *priv;
0208     unsigned long int flags;
0209     unsigned int size, i;
0210     struct sk_buff *skb;
0211 
0212     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0213     size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
0214     skb = nfp_flower_cmsg_alloc(priv->app, size,
0215                     NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
0216                     GFP_KERNEL);
0217     if (!skb)
0218         return -ENOMEM;
0219 
0220     cmsg_payload = nfp_flower_cmsg_get_data(skb);
0221     flags = 0;
0222 
0223     /* Increment batch version for each new batch of config messages. */
0224     if (*batch == NFP_FL_LAG_BATCH_FIRST) {
0225         flags |= NFP_FL_LAG_FIRST;
0226         nfp_fl_increment_version(lag);
0227         *batch = NFP_FL_LAG_BATCH_MEMBER;
0228     }
0229 
0230     /* If it is a reset msg then it is also the end of the batch. */
0231     if (lag->rst_cfg) {
0232         flags |= NFP_FL_LAG_RESET;
0233         *batch = NFP_FL_LAG_BATCH_FINISHED;
0234     }
0235 
0236     /* To signal the end of a batch, both the switch and last flags are set
0237      * and the reserved SYNC group ID is used.
0238      */
0239     if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
0240         flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
0241         lag->rst_cfg = false;
0242         cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
0243         cmsg_payload->group_inst = 0;
0244     } else {
0245         cmsg_payload->group_id = cpu_to_be32(group->group_id);
0246         cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
0247     }
0248 
0249     cmsg_payload->reserved[0] = 0;
0250     cmsg_payload->reserved[1] = 0;
0251     cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
0252     cmsg_payload->ctrl_flags = flags;
0253     cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
0254     cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
0255 
0256     for (i = 0; i < member_cnt; i++)
0257         cmsg_payload->members[i] =
0258             cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
0259 
0260     nfp_ctrl_tx(priv->app->ctrl, skb);
0261     return 0;
0262 }
0263 
0264 static void nfp_fl_lag_do_work(struct work_struct *work)
0265 {
0266     enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
0267     struct nfp_fl_lag_group *entry, *storage;
0268     struct delayed_work *delayed_work;
0269     struct nfp_flower_priv *priv;
0270     struct nfp_fl_lag *lag;
0271     int err;
0272 
0273     delayed_work = to_delayed_work(work);
0274     lag = container_of(delayed_work, struct nfp_fl_lag, work);
0275     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0276 
0277     mutex_lock(&lag->lock);
0278     list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
0279         struct net_device *iter_netdev, **acti_netdevs;
0280         struct nfp_flower_repr_priv *repr_priv;
0281         int active_count = 0, slaves = 0;
0282         struct nfp_repr *repr;
0283         unsigned long *flags;
0284 
0285         if (entry->to_remove) {
0286             /* Active count of 0 deletes group on hw. */
0287             err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
0288                               &batch);
0289             if (!err) {
0290                 entry->to_remove = false;
0291                 entry->offloaded = false;
0292             } else {
0293                 nfp_flower_cmsg_warn(priv->app,
0294                              "group delete failed\n");
0295                 schedule_delayed_work(&lag->work,
0296                               NFP_FL_LAG_DELAY);
0297                 continue;
0298             }
0299 
0300             if (entry->to_destroy) {
0301                 ida_simple_remove(&lag->ida_handle,
0302                           entry->group_id);
0303                 list_del(&entry->list);
0304                 kfree(entry);
0305             }
0306             continue;
0307         }
0308 
0309         acti_netdevs = kmalloc_array(entry->slave_cnt,
0310                          sizeof(*acti_netdevs), GFP_KERNEL);
0311 
0312         /* Include sanity check in the loop. It may be that a bond has
0313          * changed between processing the last notification and the
0314          * work queue triggering. If the number of slaves has changed
0315          * or it now contains netdevs that cannot be offloaded, ignore
0316          * the group until pending notifications are processed.
0317          */
0318         rcu_read_lock();
0319         for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
0320             if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
0321                 slaves = 0;
0322                 break;
0323             }
0324 
0325             repr = netdev_priv(iter_netdev);
0326 
0327             if (repr->app != priv->app) {
0328                 slaves = 0;
0329                 break;
0330             }
0331 
0332             slaves++;
0333             if (slaves > entry->slave_cnt)
0334                 break;
0335 
0336             /* Check the ports for state changes. */
0337             repr_priv = repr->app_priv;
0338             flags = &repr_priv->lag_port_flags;
0339 
0340             if (*flags & NFP_PORT_LAG_CHANGED) {
0341                 *flags &= ~NFP_PORT_LAG_CHANGED;
0342                 entry->dirty = true;
0343             }
0344 
0345             if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
0346                 (*flags & NFP_PORT_LAG_LINK_UP))
0347                 acti_netdevs[active_count++] = iter_netdev;
0348         }
0349         rcu_read_unlock();
0350 
0351         if (slaves != entry->slave_cnt || !entry->dirty) {
0352             kfree(acti_netdevs);
0353             continue;
0354         }
0355 
0356         err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
0357                           active_count, &batch);
0358         if (!err) {
0359             entry->offloaded = true;
0360             entry->dirty = false;
0361         } else {
0362             nfp_flower_cmsg_warn(priv->app,
0363                          "group offload failed\n");
0364             schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
0365         }
0366 
0367         kfree(acti_netdevs);
0368     }
0369 
0370     /* End the config batch if at least one packet has been batched. */
0371     if (batch == NFP_FL_LAG_BATCH_MEMBER) {
0372         batch = NFP_FL_LAG_BATCH_FINISHED;
0373         err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
0374         if (err)
0375             nfp_flower_cmsg_warn(priv->app,
0376                          "group batch end cmsg failed\n");
0377     }
0378 
0379     mutex_unlock(&lag->lock);
0380 }
0381 
0382 static int
0383 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
0384 {
0385     struct nfp_flower_cmsg_lag_config *cmsg_payload;
0386 
0387     cmsg_payload = nfp_flower_cmsg_get_data(skb);
0388     if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
0389         return -EINVAL;
0390 
0391     /* Drop cmsg retrans if storage limit is exceeded to prevent
0392      * overloading. If the fw notices that expected messages have not been
0393      * received in a given time block, it will request a full resync.
0394      */
0395     if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
0396         return -ENOSPC;
0397 
0398     __skb_queue_tail(&lag->retrans_skbs, skb);
0399 
0400     return 0;
0401 }
0402 
0403 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
0404 {
0405     struct nfp_flower_priv *priv;
0406     struct sk_buff *skb;
0407 
0408     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0409 
0410     while ((skb = __skb_dequeue(&lag->retrans_skbs)))
0411         nfp_ctrl_tx(priv->app->ctrl, skb);
0412 }
0413 
0414 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
0415 {
0416     struct nfp_flower_cmsg_lag_config *cmsg_payload;
0417     struct nfp_flower_priv *priv = app->priv;
0418     struct nfp_fl_lag_group *group_entry;
0419     unsigned long int flags;
0420     bool store_skb = false;
0421     int err;
0422 
0423     cmsg_payload = nfp_flower_cmsg_get_data(skb);
0424     flags = cmsg_payload->ctrl_flags;
0425 
0426     /* Note the intentional fall through below. If DATA and XON are both
0427      * set, the message will stored and sent again with the rest of the
0428      * unprocessed messages list.
0429      */
0430 
0431     /* Store */
0432     if (flags & NFP_FL_LAG_DATA)
0433         if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
0434             store_skb = true;
0435 
0436     /* Send stored */
0437     if (flags & NFP_FL_LAG_XON)
0438         nfp_fl_send_unprocessed(&priv->nfp_lag);
0439 
0440     /* Resend all */
0441     if (flags & NFP_FL_LAG_SYNC) {
0442         /* To resend all config:
0443          * 1) Clear all unprocessed messages
0444          * 2) Mark all groups dirty
0445          * 3) Reset NFP group config
0446          * 4) Schedule a LAG config update
0447          */
0448 
0449         __skb_queue_purge(&priv->nfp_lag.retrans_skbs);
0450 
0451         mutex_lock(&priv->nfp_lag.lock);
0452         list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
0453                     list)
0454             group_entry->dirty = true;
0455 
0456         err = nfp_flower_lag_reset(&priv->nfp_lag);
0457         if (err)
0458             nfp_flower_cmsg_warn(priv->app,
0459                          "mem err in group reset msg\n");
0460         mutex_unlock(&priv->nfp_lag.lock);
0461 
0462         schedule_delayed_work(&priv->nfp_lag.work, 0);
0463     }
0464 
0465     return store_skb;
0466 }
0467 
0468 static void
0469 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
0470                  struct nfp_fl_lag_group *group)
0471 {
0472     group->to_remove = true;
0473 
0474     schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
0475 }
0476 
0477 static void
0478 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
0479                  struct net_device *master)
0480 {
0481     struct nfp_fl_lag_group *group;
0482     struct nfp_flower_priv *priv;
0483 
0484     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0485 
0486     if (!netif_is_bond_master(master))
0487         return;
0488 
0489     mutex_lock(&lag->lock);
0490     group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
0491     if (!group) {
0492         mutex_unlock(&lag->lock);
0493         nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
0494              netdev_name(master));
0495         return;
0496     }
0497 
0498     group->to_remove = true;
0499     group->to_destroy = true;
0500     mutex_unlock(&lag->lock);
0501 
0502     schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
0503 }
0504 
0505 static int
0506 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
0507                  struct netdev_notifier_changeupper_info *info)
0508 {
0509     struct net_device *upper = info->upper_dev, *iter_netdev;
0510     struct netdev_lag_upper_info *lag_upper_info;
0511     struct nfp_fl_lag_group *group;
0512     struct nfp_flower_priv *priv;
0513     unsigned int slave_count = 0;
0514     bool can_offload = true;
0515     struct nfp_repr *repr;
0516 
0517     if (!netif_is_lag_master(upper))
0518         return 0;
0519 
0520     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0521 
0522     rcu_read_lock();
0523     for_each_netdev_in_bond_rcu(upper, iter_netdev) {
0524         if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
0525             can_offload = false;
0526             break;
0527         }
0528         repr = netdev_priv(iter_netdev);
0529 
0530         /* Ensure all ports are created by the same app/on same card. */
0531         if (repr->app != priv->app) {
0532             can_offload = false;
0533             break;
0534         }
0535 
0536         slave_count++;
0537     }
0538     rcu_read_unlock();
0539 
0540     lag_upper_info = info->upper_info;
0541 
0542     /* Firmware supports active/backup and L3/L4 hash bonds. */
0543     if (lag_upper_info &&
0544         lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
0545         (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
0546          (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
0547           lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
0548           lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
0549         can_offload = false;
0550         nfp_flower_cmsg_warn(priv->app,
0551                      "Unable to offload tx_type %u hash %u\n",
0552                      lag_upper_info->tx_type,
0553                      lag_upper_info->hash_type);
0554     }
0555 
0556     mutex_lock(&lag->lock);
0557     group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
0558 
0559     if (slave_count == 0 || !can_offload) {
0560         /* Cannot offload the group - remove if previously offloaded. */
0561         if (group && group->offloaded)
0562             nfp_fl_lag_schedule_group_remove(lag, group);
0563 
0564         mutex_unlock(&lag->lock);
0565         return 0;
0566     }
0567 
0568     if (!group) {
0569         group = nfp_fl_lag_group_create(lag, upper);
0570         if (IS_ERR(group)) {
0571             mutex_unlock(&lag->lock);
0572             return PTR_ERR(group);
0573         }
0574     }
0575 
0576     group->dirty = true;
0577     group->slave_cnt = slave_count;
0578 
0579     /* Group may have been on queue for removal but is now offloadable. */
0580     group->to_remove = false;
0581     mutex_unlock(&lag->lock);
0582 
0583     schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
0584     return 0;
0585 }
0586 
0587 static void
0588 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
0589               struct netdev_notifier_changelowerstate_info *info)
0590 {
0591     struct netdev_lag_lower_state_info *lag_lower_info;
0592     struct nfp_flower_repr_priv *repr_priv;
0593     struct nfp_flower_priv *priv;
0594     struct nfp_repr *repr;
0595     unsigned long *flags;
0596 
0597     if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
0598         return;
0599 
0600     lag_lower_info = info->lower_state_info;
0601     if (!lag_lower_info)
0602         return;
0603 
0604     priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
0605     repr = netdev_priv(netdev);
0606 
0607     /* Verify that the repr is associated with this app. */
0608     if (repr->app != priv->app)
0609         return;
0610 
0611     repr_priv = repr->app_priv;
0612     flags = &repr_priv->lag_port_flags;
0613 
0614     mutex_lock(&lag->lock);
0615     if (lag_lower_info->link_up)
0616         *flags |= NFP_PORT_LAG_LINK_UP;
0617     else
0618         *flags &= ~NFP_PORT_LAG_LINK_UP;
0619 
0620     if (lag_lower_info->tx_enabled)
0621         *flags |= NFP_PORT_LAG_TX_ENABLED;
0622     else
0623         *flags &= ~NFP_PORT_LAG_TX_ENABLED;
0624 
0625     *flags |= NFP_PORT_LAG_CHANGED;
0626     mutex_unlock(&lag->lock);
0627 
0628     schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
0629 }
0630 
0631 int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
0632                 struct net_device *netdev,
0633                 unsigned long event, void *ptr)
0634 {
0635     struct nfp_fl_lag *lag = &priv->nfp_lag;
0636     int err;
0637 
0638     switch (event) {
0639     case NETDEV_CHANGEUPPER:
0640         err = nfp_fl_lag_changeupper_event(lag, ptr);
0641         if (err)
0642             return NOTIFY_BAD;
0643         return NOTIFY_OK;
0644     case NETDEV_CHANGELOWERSTATE:
0645         nfp_fl_lag_changels_event(lag, netdev, ptr);
0646         return NOTIFY_OK;
0647     case NETDEV_UNREGISTER:
0648         nfp_fl_lag_schedule_group_delete(lag, netdev);
0649         return NOTIFY_OK;
0650     }
0651 
0652     return NOTIFY_DONE;
0653 }
0654 
0655 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
0656 {
0657     enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
0658 
0659     lag->rst_cfg = true;
0660     return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
0661 }
0662 
0663 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
0664 {
0665     INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
0666     INIT_LIST_HEAD(&lag->group_list);
0667     mutex_init(&lag->lock);
0668     ida_init(&lag->ida_handle);
0669 
0670     __skb_queue_head_init(&lag->retrans_skbs);
0671 
0672     /* 0 is a reserved batch version so increment to first valid value. */
0673     nfp_fl_increment_version(lag);
0674 }
0675 
0676 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
0677 {
0678     struct nfp_fl_lag_group *entry, *storage;
0679 
0680     cancel_delayed_work_sync(&lag->work);
0681 
0682     __skb_queue_purge(&lag->retrans_skbs);
0683 
0684     /* Remove all groups. */
0685     mutex_lock(&lag->lock);
0686     list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
0687         list_del(&entry->list);
0688         kfree(entry);
0689     }
0690     mutex_unlock(&lag->lock);
0691     mutex_destroy(&lag->lock);
0692     ida_destroy(&lag->ida_handle);
0693 }