Back to home page

OSCL-LXR

 
 

    


0001 /**********************************************************************
0002  * Author: Cavium, Inc.
0003  *
0004  * Contact: support@cavium.com
0005  *          Please include "LiquidIO" in the subject.
0006  *
0007  * Copyright (c) 2003-2017 Cavium, Inc.
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more details.
0017  ***********************************************************************/
0018 #include <linux/pci.h>
0019 #include <linux/if_vlan.h>
0020 #include "liquidio_common.h"
0021 #include "octeon_droq.h"
0022 #include "octeon_iq.h"
0023 #include "response_manager.h"
0024 #include "octeon_device.h"
0025 #include "octeon_nic.h"
0026 #include "octeon_main.h"
0027 #include "octeon_network.h"
0028 #include "lio_vf_rep.h"
0029 
0030 static int lio_vf_rep_open(struct net_device *ndev);
0031 static int lio_vf_rep_stop(struct net_device *ndev);
0032 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
0033                        struct net_device *ndev);
0034 static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
0035 static int lio_vf_rep_phys_port_name(struct net_device *dev,
0036                      char *buf, size_t len);
0037 static void lio_vf_rep_get_stats64(struct net_device *dev,
0038                    struct rtnl_link_stats64 *stats64);
0039 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
0040 static int lio_vf_get_port_parent_id(struct net_device *dev,
0041                      struct netdev_phys_item_id *ppid);
0042 
0043 static const struct net_device_ops lio_vf_rep_ndev_ops = {
0044     .ndo_open = lio_vf_rep_open,
0045     .ndo_stop = lio_vf_rep_stop,
0046     .ndo_start_xmit = lio_vf_rep_pkt_xmit,
0047     .ndo_tx_timeout = lio_vf_rep_tx_timeout,
0048     .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
0049     .ndo_get_stats64 = lio_vf_rep_get_stats64,
0050     .ndo_change_mtu = lio_vf_rep_change_mtu,
0051     .ndo_get_port_parent_id = lio_vf_get_port_parent_id,
0052 };
0053 
0054 static int
0055 lio_vf_rep_send_soft_command(struct octeon_device *oct,
0056                  void *req, int req_size,
0057                  void *resp, int resp_size)
0058 {
0059     int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
0060     struct octeon_soft_command *sc = NULL;
0061     struct lio_vf_rep_resp *rep_resp;
0062     void *sc_req;
0063     int err;
0064 
0065     sc = (struct octeon_soft_command *)
0066         octeon_alloc_soft_command(oct, req_size,
0067                       tot_resp_size, 0);
0068     if (!sc)
0069         return -ENOMEM;
0070 
0071     init_completion(&sc->complete);
0072     sc->sc_status = OCTEON_REQUEST_PENDING;
0073 
0074     sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
0075     memcpy(sc_req, req, req_size);
0076 
0077     rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
0078     memset(rep_resp, 0, tot_resp_size);
0079     WRITE_ONCE(rep_resp->status, 1);
0080 
0081     sc->iq_no = 0;
0082     octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
0083                     OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
0084 
0085     err = octeon_send_soft_command(oct, sc);
0086     if (err == IQ_SEND_FAILED)
0087         goto free_buff;
0088 
0089     err = wait_for_sc_completion_timeout(oct, sc, 0);
0090     if (err)
0091         return err;
0092 
0093     err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
0094     if (err)
0095         dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
0096     else if (resp)
0097         memcpy(resp, (rep_resp + 1), resp_size);
0098 
0099     WRITE_ONCE(sc->caller_is_done, true);
0100     return err;
0101 
0102 free_buff:
0103     octeon_free_soft_command(oct, sc);
0104 
0105     return err;
0106 }
0107 
0108 static int
0109 lio_vf_rep_open(struct net_device *ndev)
0110 {
0111     struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
0112     struct lio_vf_rep_req rep_cfg;
0113     struct octeon_device *oct;
0114     int ret;
0115 
0116     oct = vf_rep->oct;
0117 
0118     memset(&rep_cfg, 0, sizeof(rep_cfg));
0119     rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
0120     rep_cfg.ifidx = vf_rep->ifidx;
0121     rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
0122 
0123     ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
0124                        sizeof(rep_cfg), NULL, 0);
0125 
0126     if (ret) {
0127         dev_err(&oct->pci_dev->dev,
0128             "VF_REP open failed with err %d\n", ret);
0129         return -EIO;
0130     }
0131 
0132     atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
0133                       LIO_IFSTATE_RUNNING));
0134 
0135     netif_carrier_on(ndev);
0136     netif_start_queue(ndev);
0137 
0138     return 0;
0139 }
0140 
0141 static int
0142 lio_vf_rep_stop(struct net_device *ndev)
0143 {
0144     struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
0145     struct lio_vf_rep_req rep_cfg;
0146     struct octeon_device *oct;
0147     int ret;
0148 
0149     oct = vf_rep->oct;
0150 
0151     memset(&rep_cfg, 0, sizeof(rep_cfg));
0152     rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
0153     rep_cfg.ifidx = vf_rep->ifidx;
0154     rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
0155 
0156     ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
0157                        sizeof(rep_cfg), NULL, 0);
0158 
0159     if (ret) {
0160         dev_err(&oct->pci_dev->dev,
0161             "VF_REP dev stop failed with err %d\n", ret);
0162         return -EIO;
0163     }
0164 
0165     atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
0166                       ~LIO_IFSTATE_RUNNING));
0167 
0168     netif_tx_disable(ndev);
0169     netif_carrier_off(ndev);
0170 
0171     return 0;
0172 }
0173 
0174 static void
0175 lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
0176 {
0177     netif_trans_update(ndev);
0178 
0179     netif_wake_queue(ndev);
0180 }
0181 
0182 static void
0183 lio_vf_rep_get_stats64(struct net_device *dev,
0184                struct rtnl_link_stats64 *stats64)
0185 {
0186     struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
0187 
0188     /* Swap tx and rx stats as VF rep is a switch port */
0189     stats64->tx_packets = vf_rep->stats.rx_packets;
0190     stats64->tx_bytes   = vf_rep->stats.rx_bytes;
0191     stats64->tx_dropped = vf_rep->stats.rx_dropped;
0192 
0193     stats64->rx_packets = vf_rep->stats.tx_packets;
0194     stats64->rx_bytes   = vf_rep->stats.tx_bytes;
0195     stats64->rx_dropped = vf_rep->stats.tx_dropped;
0196 }
0197 
0198 static int
0199 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
0200 {
0201     struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
0202     struct lio_vf_rep_req rep_cfg;
0203     struct octeon_device *oct;
0204     int ret;
0205 
0206     oct = vf_rep->oct;
0207 
0208     memset(&rep_cfg, 0, sizeof(rep_cfg));
0209     rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
0210     rep_cfg.ifidx = vf_rep->ifidx;
0211     rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
0212 
0213     ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
0214                        sizeof(rep_cfg), NULL, 0);
0215     if (ret) {
0216         dev_err(&oct->pci_dev->dev,
0217             "Change MTU failed with err %d\n", ret);
0218         return -EIO;
0219     }
0220 
0221     ndev->mtu = new_mtu;
0222 
0223     return 0;
0224 }
0225 
0226 static int
0227 lio_vf_rep_phys_port_name(struct net_device *dev,
0228               char *buf, size_t len)
0229 {
0230     struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
0231     struct octeon_device *oct = vf_rep->oct;
0232     int ret;
0233 
0234     ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
0235                vf_rep->ifidx - oct->pf_num * 64 - 1);
0236     if (ret >= len)
0237         return -EOPNOTSUPP;
0238 
0239     return 0;
0240 }
0241 
0242 static struct net_device *
0243 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
0244 {
0245     int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
0246     int vfid_mask = max_vfs - 1;
0247 
0248     if (ifidx <= oct->pf_num * max_vfs ||
0249         ifidx >= oct->pf_num * max_vfs + max_vfs)
0250         return NULL;
0251 
0252     /* ifidx 1-63 for PF0 VFs
0253      * ifidx 65-127 for PF1 VFs
0254      */
0255     vf_id = (ifidx & vfid_mask) - 1;
0256 
0257     return oct->vf_rep_list.ndev[vf_id];
0258 }
0259 
0260 static void
0261 lio_vf_rep_copy_packet(struct octeon_device *oct,
0262                struct sk_buff *skb,
0263                int len)
0264 {
0265     if (likely(len > MIN_SKB_SIZE)) {
0266         struct octeon_skb_page_info *pg_info;
0267         unsigned char *va;
0268 
0269         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
0270         if (pg_info->page) {
0271             va = page_address(pg_info->page) +
0272                 pg_info->page_offset;
0273             memcpy(skb->data, va, MIN_SKB_SIZE);
0274             skb_put(skb, MIN_SKB_SIZE);
0275         }
0276 
0277         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0278                 pg_info->page,
0279                 pg_info->page_offset + MIN_SKB_SIZE,
0280                 len - MIN_SKB_SIZE,
0281                 LIO_RXBUFFER_SZ);
0282     } else {
0283         struct octeon_skb_page_info *pg_info =
0284             ((struct octeon_skb_page_info *)(skb->cb));
0285 
0286         skb_copy_to_linear_data(skb, page_address(pg_info->page) +
0287                     pg_info->page_offset, len);
0288         skb_put(skb, len);
0289         put_page(pg_info->page);
0290     }
0291 }
0292 
0293 static int
0294 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
0295 {
0296     struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
0297     struct lio_vf_rep_desc *vf_rep;
0298     struct net_device *vf_ndev;
0299     struct octeon_device *oct;
0300     union octeon_rh *rh;
0301     struct sk_buff *skb;
0302     int i, ifidx;
0303 
0304     oct = lio_get_device(recv_pkt->octeon_id);
0305     if (!oct)
0306         goto free_buffers;
0307 
0308     skb = recv_pkt->buffer_ptr[0];
0309     rh = &recv_pkt->rh;
0310     ifidx = rh->r.ossp;
0311 
0312     vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
0313     if (!vf_ndev)
0314         goto free_buffers;
0315 
0316     vf_rep = netdev_priv(vf_ndev);
0317     if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
0318         recv_pkt->buffer_count > 1)
0319         goto free_buffers;
0320 
0321     skb->dev = vf_ndev;
0322 
0323     /* Multiple buffers are not used for vf_rep packets.
0324      * So just buffer_size[0] is valid.
0325      */
0326     lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
0327 
0328     skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
0329     skb->protocol = eth_type_trans(skb, skb->dev);
0330     skb->ip_summed = CHECKSUM_NONE;
0331 
0332     netif_rx(skb);
0333 
0334     octeon_free_recv_info(recv_info);
0335 
0336     return 0;
0337 
0338 free_buffers:
0339     for (i = 0; i < recv_pkt->buffer_count; i++)
0340         recv_buffer_free(recv_pkt->buffer_ptr[i]);
0341 
0342     octeon_free_recv_info(recv_info);
0343 
0344     return 0;
0345 }
0346 
0347 static void
0348 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
0349                 u32 status, void *buf)
0350 {
0351     struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
0352     struct sk_buff *skb = sc->ctxptr;
0353     struct net_device *ndev = skb->dev;
0354     u32 iq_no;
0355 
0356     dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
0357              sc->datasize, DMA_TO_DEVICE);
0358     dev_kfree_skb_any(skb);
0359     iq_no = sc->iq_no;
0360     octeon_free_soft_command(oct, sc);
0361 
0362     if (octnet_iq_is_full(oct, iq_no))
0363         return;
0364 
0365     if (netif_queue_stopped(ndev))
0366         netif_wake_queue(ndev);
0367 }
0368 
0369 static netdev_tx_t
0370 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
0371 {
0372     struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
0373     struct net_device *parent_ndev = vf_rep->parent_ndev;
0374     struct octeon_device *oct = vf_rep->oct;
0375     struct octeon_instr_pki_ih3 *pki_ih3;
0376     struct octeon_soft_command *sc;
0377     struct lio *parent_lio;
0378     int status;
0379 
0380     parent_lio = GET_LIO(parent_ndev);
0381 
0382     if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
0383         skb->len <= 0)
0384         goto xmit_failed;
0385 
0386     if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
0387         dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
0388         netif_stop_queue(ndev);
0389         return NETDEV_TX_BUSY;
0390     }
0391 
0392     sc = (struct octeon_soft_command *)
0393         octeon_alloc_soft_command(oct, 0, 16, 0);
0394     if (!sc) {
0395         dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
0396         goto xmit_failed;
0397     }
0398 
0399     /* Multiple buffers are not used for vf_rep packets. */
0400     if (skb_shinfo(skb)->nr_frags != 0) {
0401         dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
0402         octeon_free_soft_command(oct, sc);
0403         goto xmit_failed;
0404     }
0405 
0406     sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
0407                      skb->data, skb->len, DMA_TO_DEVICE);
0408     if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
0409         dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
0410         octeon_free_soft_command(oct, sc);
0411         goto xmit_failed;
0412     }
0413 
0414     sc->virtdptr = skb->data;
0415     sc->datasize = skb->len;
0416     sc->ctxptr = skb;
0417     sc->iq_no = parent_lio->txq;
0418 
0419     octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
0420                     vf_rep->ifidx, 0, 0);
0421     pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
0422     pki_ih3->tagtype = ORDERED_TAG;
0423 
0424     sc->callback = lio_vf_rep_packet_sent_callback;
0425     sc->callback_arg = sc;
0426 
0427     status = octeon_send_soft_command(oct, sc);
0428     if (status == IQ_SEND_FAILED) {
0429         dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
0430                  sc->datasize, DMA_TO_DEVICE);
0431         octeon_free_soft_command(oct, sc);
0432         goto xmit_failed;
0433     }
0434 
0435     if (status == IQ_SEND_STOP)
0436         netif_stop_queue(ndev);
0437 
0438     netif_trans_update(ndev);
0439 
0440     return NETDEV_TX_OK;
0441 
0442 xmit_failed:
0443     dev_kfree_skb_any(skb);
0444 
0445     return NETDEV_TX_OK;
0446 }
0447 
0448 static int lio_vf_get_port_parent_id(struct net_device *dev,
0449                      struct netdev_phys_item_id *ppid)
0450 {
0451     struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
0452     struct net_device *parent_ndev = vf_rep->parent_ndev;
0453     struct lio *lio = GET_LIO(parent_ndev);
0454 
0455     ppid->id_len = ETH_ALEN;
0456     ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
0457 
0458     return 0;
0459 }
0460 
0461 static void
0462 lio_vf_rep_fetch_stats(struct work_struct *work)
0463 {
0464     struct cavium_wk *wk = (struct cavium_wk *)work;
0465     struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
0466     struct lio_vf_rep_stats stats;
0467     struct lio_vf_rep_req rep_cfg;
0468     struct octeon_device *oct;
0469     int ret;
0470 
0471     oct = vf_rep->oct;
0472 
0473     memset(&rep_cfg, 0, sizeof(rep_cfg));
0474     rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
0475     rep_cfg.ifidx = vf_rep->ifidx;
0476 
0477     ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
0478                        &stats, sizeof(stats));
0479 
0480     if (!ret) {
0481         octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
0482         memcpy(&vf_rep->stats, &stats, sizeof(stats));
0483     }
0484 
0485     schedule_delayed_work(&vf_rep->stats_wk.work,
0486                   msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
0487 }
0488 
0489 int
0490 lio_vf_rep_create(struct octeon_device *oct)
0491 {
0492     struct lio_vf_rep_desc *vf_rep;
0493     struct net_device *ndev;
0494     int i, num_vfs;
0495 
0496     if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
0497         return 0;
0498 
0499     if (!oct->sriov_info.sriov_enabled)
0500         return 0;
0501 
0502     num_vfs = oct->sriov_info.num_vfs_alloced;
0503 
0504     oct->vf_rep_list.num_vfs = 0;
0505     for (i = 0; i < num_vfs; i++) {
0506         ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
0507 
0508         if (!ndev) {
0509             dev_err(&oct->pci_dev->dev,
0510                 "VF rep device %d creation failed\n", i);
0511             goto cleanup;
0512         }
0513 
0514         ndev->min_mtu = LIO_MIN_MTU_SIZE;
0515         ndev->max_mtu = LIO_MAX_MTU_SIZE;
0516         ndev->netdev_ops = &lio_vf_rep_ndev_ops;
0517 
0518         vf_rep = netdev_priv(ndev);
0519         memset(vf_rep, 0, sizeof(*vf_rep));
0520 
0521         vf_rep->ndev = ndev;
0522         vf_rep->oct = oct;
0523         vf_rep->parent_ndev = oct->props[0].netdev;
0524         vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
0525 
0526         eth_hw_addr_random(ndev);
0527 
0528         if (register_netdev(ndev)) {
0529             dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
0530 
0531             free_netdev(ndev);
0532             goto cleanup;
0533         }
0534 
0535         netif_carrier_off(ndev);
0536 
0537         INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
0538                   lio_vf_rep_fetch_stats);
0539         vf_rep->stats_wk.ctxptr = (void *)vf_rep;
0540         schedule_delayed_work(&vf_rep->stats_wk.work,
0541                       msecs_to_jiffies
0542                       (LIO_VF_REP_STATS_POLL_TIME_MS));
0543         oct->vf_rep_list.num_vfs++;
0544         oct->vf_rep_list.ndev[i] = ndev;
0545     }
0546 
0547     if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
0548                     OPCODE_NIC_VF_REP_PKT,
0549                     lio_vf_rep_pkt_recv, oct)) {
0550         dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
0551 
0552         goto cleanup;
0553     }
0554 
0555     return 0;
0556 
0557 cleanup:
0558     for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
0559         ndev = oct->vf_rep_list.ndev[i];
0560         oct->vf_rep_list.ndev[i] = NULL;
0561         if (ndev) {
0562             vf_rep = netdev_priv(ndev);
0563             cancel_delayed_work_sync
0564                 (&vf_rep->stats_wk.work);
0565             unregister_netdev(ndev);
0566             free_netdev(ndev);
0567         }
0568     }
0569 
0570     oct->vf_rep_list.num_vfs = 0;
0571 
0572     return -1;
0573 }
0574 
0575 void
0576 lio_vf_rep_destroy(struct octeon_device *oct)
0577 {
0578     struct lio_vf_rep_desc *vf_rep;
0579     struct net_device *ndev;
0580     int i;
0581 
0582     if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
0583         return;
0584 
0585     if (!oct->sriov_info.sriov_enabled)
0586         return;
0587 
0588     for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
0589         ndev = oct->vf_rep_list.ndev[i];
0590         oct->vf_rep_list.ndev[i] = NULL;
0591         if (ndev) {
0592             vf_rep = netdev_priv(ndev);
0593             cancel_delayed_work_sync
0594                 (&vf_rep->stats_wk.work);
0595             netif_tx_disable(ndev);
0596             netif_carrier_off(ndev);
0597 
0598             unregister_netdev(ndev);
0599             free_netdev(ndev);
0600         }
0601     }
0602 
0603     oct->vf_rep_list.num_vfs = 0;
0604 }
0605 
0606 static int
0607 lio_vf_rep_netdev_event(struct notifier_block *nb,
0608             unsigned long event, void *ptr)
0609 {
0610     struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
0611     struct lio_vf_rep_desc *vf_rep;
0612     struct lio_vf_rep_req rep_cfg;
0613     struct octeon_device *oct;
0614     int ret;
0615 
0616     switch (event) {
0617     case NETDEV_REGISTER:
0618     case NETDEV_CHANGENAME:
0619         break;
0620 
0621     default:
0622         return NOTIFY_DONE;
0623     }
0624 
0625     if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
0626         return NOTIFY_DONE;
0627 
0628     vf_rep = netdev_priv(ndev);
0629     oct = vf_rep->oct;
0630 
0631     if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
0632         dev_err(&oct->pci_dev->dev,
0633             "Device name change sync failed as the size is > %d\n",
0634             LIO_IF_NAME_SIZE);
0635         return NOTIFY_DONE;
0636     }
0637 
0638     memset(&rep_cfg, 0, sizeof(rep_cfg));
0639     rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
0640     rep_cfg.ifidx = vf_rep->ifidx;
0641     strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
0642 
0643     ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
0644                        sizeof(rep_cfg), NULL, 0);
0645     if (ret)
0646         dev_err(&oct->pci_dev->dev,
0647             "vf_rep netdev name change failed with err %d\n", ret);
0648 
0649     return NOTIFY_DONE;
0650 }
0651 
0652 static struct notifier_block lio_vf_rep_netdev_notifier = {
0653     .notifier_call = lio_vf_rep_netdev_event,
0654 };
0655 
0656 int
0657 lio_vf_rep_modinit(void)
0658 {
0659     if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
0660         pr_err("netdev notifier registration failed\n");
0661         return -EFAULT;
0662     }
0663 
0664     return 0;
0665 }
0666 
0667 void
0668 lio_vf_rep_modexit(void)
0669 {
0670     if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
0671         pr_err("netdev notifier unregister failed\n");
0672 }