Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
0004  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
0005  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/pci.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/sched.h>
0013 #include <linux/wait.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/sort.h>
0016 #include <linux/random.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/skbuff.h>
0019 #include <linux/socket.h>
0020 #include <linux/etherdevice.h>
0021 #include <linux/ethtool.h>
0022 #include <linux/if_ether.h>
0023 #include <linux/if_vlan.h>
0024 #include <linux/if_bridge.h>
0025 #include <linux/bitops.h>
0026 #include <linux/ctype.h>
0027 #include <linux/workqueue.h>
0028 #include <net/switchdev.h>
0029 #include <net/rtnetlink.h>
0030 #include <net/netevent.h>
0031 #include <net/arp.h>
0032 #include <net/fib_rules.h>
0033 #include <net/fib_notifier.h>
0034 #include <linux/io-64-nonatomic-lo-hi.h>
0035 #include <generated/utsrelease.h>
0036 
0037 #include "rocker_hw.h"
0038 #include "rocker.h"
0039 #include "rocker_tlv.h"
0040 
0041 static const char rocker_driver_name[] = "rocker";
0042 
0043 static const struct pci_device_id rocker_pci_id_table[] = {
0044     {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
0045     {0, }
0046 };
0047 
0048 struct rocker_wait {
0049     wait_queue_head_t wait;
0050     bool done;
0051     bool nowait;
0052 };
0053 
0054 static void rocker_wait_reset(struct rocker_wait *wait)
0055 {
0056     wait->done = false;
0057     wait->nowait = false;
0058 }
0059 
0060 static void rocker_wait_init(struct rocker_wait *wait)
0061 {
0062     init_waitqueue_head(&wait->wait);
0063     rocker_wait_reset(wait);
0064 }
0065 
0066 static struct rocker_wait *rocker_wait_create(void)
0067 {
0068     struct rocker_wait *wait;
0069 
0070     wait = kzalloc(sizeof(*wait), GFP_KERNEL);
0071     if (!wait)
0072         return NULL;
0073     return wait;
0074 }
0075 
0076 static void rocker_wait_destroy(struct rocker_wait *wait)
0077 {
0078     kfree(wait);
0079 }
0080 
0081 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
0082                       unsigned long timeout)
0083 {
0084     wait_event_timeout(wait->wait, wait->done, HZ / 10);
0085     if (!wait->done)
0086         return false;
0087     return true;
0088 }
0089 
0090 static void rocker_wait_wake_up(struct rocker_wait *wait)
0091 {
0092     wait->done = true;
0093     wake_up(&wait->wait);
0094 }
0095 
0096 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
0097 {
0098     return rocker->msix_entries[vector].vector;
0099 }
0100 
0101 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
0102 {
0103     return rocker_msix_vector(rocker_port->rocker,
0104                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
0105 }
0106 
0107 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
0108 {
0109     return rocker_msix_vector(rocker_port->rocker,
0110                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
0111 }
0112 
0113 #define rocker_write32(rocker, reg, val)    \
0114     writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
0115 #define rocker_read32(rocker, reg)  \
0116     readl((rocker)->hw_addr + (ROCKER_ ## reg))
0117 #define rocker_write64(rocker, reg, val)    \
0118     writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
0119 #define rocker_read64(rocker, reg)  \
0120     readq((rocker)->hw_addr + (ROCKER_ ## reg))
0121 
0122 /*****************************
0123  * HW basic testing functions
0124  *****************************/
0125 
0126 static int rocker_reg_test(const struct rocker *rocker)
0127 {
0128     const struct pci_dev *pdev = rocker->pdev;
0129     u64 test_reg;
0130     u64 rnd;
0131 
0132     rnd = prandom_u32();
0133     rnd >>= 1;
0134     rocker_write32(rocker, TEST_REG, rnd);
0135     test_reg = rocker_read32(rocker, TEST_REG);
0136     if (test_reg != rnd * 2) {
0137         dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
0138             test_reg, rnd * 2);
0139         return -EIO;
0140     }
0141 
0142     rnd = prandom_u32();
0143     rnd <<= 31;
0144     rnd |= prandom_u32();
0145     rocker_write64(rocker, TEST_REG64, rnd);
0146     test_reg = rocker_read64(rocker, TEST_REG64);
0147     if (test_reg != rnd * 2) {
0148         dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
0149             test_reg, rnd * 2);
0150         return -EIO;
0151     }
0152 
0153     return 0;
0154 }
0155 
0156 static int rocker_dma_test_one(const struct rocker *rocker,
0157                    struct rocker_wait *wait, u32 test_type,
0158                    dma_addr_t dma_handle, const unsigned char *buf,
0159                    const unsigned char *expect, size_t size)
0160 {
0161     const struct pci_dev *pdev = rocker->pdev;
0162     int i;
0163 
0164     rocker_wait_reset(wait);
0165     rocker_write32(rocker, TEST_DMA_CTRL, test_type);
0166 
0167     if (!rocker_wait_event_timeout(wait, HZ / 10)) {
0168         dev_err(&pdev->dev, "no interrupt received within a timeout\n");
0169         return -EIO;
0170     }
0171 
0172     for (i = 0; i < size; i++) {
0173         if (buf[i] != expect[i]) {
0174             dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
0175                 buf[i], i, expect[i]);
0176             return -EIO;
0177         }
0178     }
0179     return 0;
0180 }
0181 
0182 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
0183 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
0184 
0185 static int rocker_dma_test_offset(const struct rocker *rocker,
0186                   struct rocker_wait *wait, int offset)
0187 {
0188     struct pci_dev *pdev = rocker->pdev;
0189     unsigned char *alloc;
0190     unsigned char *buf;
0191     unsigned char *expect;
0192     dma_addr_t dma_handle;
0193     int i;
0194     int err;
0195 
0196     alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
0197             GFP_KERNEL | GFP_DMA);
0198     if (!alloc)
0199         return -ENOMEM;
0200     buf = alloc + offset;
0201     expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
0202 
0203     dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE,
0204                     DMA_BIDIRECTIONAL);
0205     if (dma_mapping_error(&pdev->dev, dma_handle)) {
0206         err = -EIO;
0207         goto free_alloc;
0208     }
0209 
0210     rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
0211     rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
0212 
0213     memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
0214     err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
0215                   dma_handle, buf, expect,
0216                   ROCKER_TEST_DMA_BUF_SIZE);
0217     if (err)
0218         goto unmap;
0219 
0220     memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
0221     err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
0222                   dma_handle, buf, expect,
0223                   ROCKER_TEST_DMA_BUF_SIZE);
0224     if (err)
0225         goto unmap;
0226 
0227     prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
0228     for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
0229         expect[i] = ~buf[i];
0230     err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
0231                   dma_handle, buf, expect,
0232                   ROCKER_TEST_DMA_BUF_SIZE);
0233     if (err)
0234         goto unmap;
0235 
0236 unmap:
0237     dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
0238              DMA_BIDIRECTIONAL);
0239 free_alloc:
0240     kfree(alloc);
0241 
0242     return err;
0243 }
0244 
0245 static int rocker_dma_test(const struct rocker *rocker,
0246                struct rocker_wait *wait)
0247 {
0248     int i;
0249     int err;
0250 
0251     for (i = 0; i < 8; i++) {
0252         err = rocker_dma_test_offset(rocker, wait, i);
0253         if (err)
0254             return err;
0255     }
0256     return 0;
0257 }
0258 
0259 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
0260 {
0261     struct rocker_wait *wait = dev_id;
0262 
0263     rocker_wait_wake_up(wait);
0264 
0265     return IRQ_HANDLED;
0266 }
0267 
0268 static int rocker_basic_hw_test(const struct rocker *rocker)
0269 {
0270     const struct pci_dev *pdev = rocker->pdev;
0271     struct rocker_wait wait;
0272     int err;
0273 
0274     err = rocker_reg_test(rocker);
0275     if (err) {
0276         dev_err(&pdev->dev, "reg test failed\n");
0277         return err;
0278     }
0279 
0280     err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
0281               rocker_test_irq_handler, 0,
0282               rocker_driver_name, &wait);
0283     if (err) {
0284         dev_err(&pdev->dev, "cannot assign test irq\n");
0285         return err;
0286     }
0287 
0288     rocker_wait_init(&wait);
0289     rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
0290 
0291     if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
0292         dev_err(&pdev->dev, "no interrupt received within a timeout\n");
0293         err = -EIO;
0294         goto free_irq;
0295     }
0296 
0297     err = rocker_dma_test(rocker, &wait);
0298     if (err)
0299         dev_err(&pdev->dev, "dma test failed\n");
0300 
0301 free_irq:
0302     free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
0303     return err;
0304 }
0305 
0306 /******************************************
0307  * DMA rings and descriptors manipulations
0308  ******************************************/
0309 
0310 static u32 __pos_inc(u32 pos, size_t limit)
0311 {
0312     return ++pos == limit ? 0 : pos;
0313 }
0314 
0315 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
0316 {
0317     int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
0318 
0319     switch (err) {
0320     case ROCKER_OK:
0321         return 0;
0322     case -ROCKER_ENOENT:
0323         return -ENOENT;
0324     case -ROCKER_ENXIO:
0325         return -ENXIO;
0326     case -ROCKER_ENOMEM:
0327         return -ENOMEM;
0328     case -ROCKER_EEXIST:
0329         return -EEXIST;
0330     case -ROCKER_EINVAL:
0331         return -EINVAL;
0332     case -ROCKER_EMSGSIZE:
0333         return -EMSGSIZE;
0334     case -ROCKER_ENOTSUP:
0335         return -EOPNOTSUPP;
0336     case -ROCKER_ENOBUFS:
0337         return -ENOBUFS;
0338     }
0339 
0340     return -EINVAL;
0341 }
0342 
0343 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
0344 {
0345     desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
0346 }
0347 
0348 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
0349 {
0350     u32 comp_err = desc_info->desc->comp_err;
0351 
0352     return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
0353 }
0354 
0355 static void *
0356 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
0357 {
0358     return (void *)(uintptr_t)desc_info->desc->cookie;
0359 }
0360 
0361 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
0362                        void *ptr)
0363 {
0364     desc_info->desc->cookie = (uintptr_t) ptr;
0365 }
0366 
0367 static struct rocker_desc_info *
0368 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
0369 {
0370     struct rocker_desc_info *desc_info;
0371     u32 head = __pos_inc(info->head, info->size);
0372 
0373     desc_info = &info->desc_info[info->head];
0374     if (head == info->tail)
0375         return NULL; /* ring full */
0376     desc_info->tlv_size = 0;
0377     return desc_info;
0378 }
0379 
0380 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
0381 {
0382     desc_info->desc->buf_size = desc_info->data_size;
0383     desc_info->desc->tlv_size = desc_info->tlv_size;
0384 }
0385 
0386 static void rocker_desc_head_set(const struct rocker *rocker,
0387                  struct rocker_dma_ring_info *info,
0388                  const struct rocker_desc_info *desc_info)
0389 {
0390     u32 head = __pos_inc(info->head, info->size);
0391 
0392     BUG_ON(head == info->tail);
0393     rocker_desc_commit(desc_info);
0394     info->head = head;
0395     rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
0396 }
0397 
0398 static struct rocker_desc_info *
0399 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
0400 {
0401     struct rocker_desc_info *desc_info;
0402 
0403     if (info->tail == info->head)
0404         return NULL; /* nothing to be done between head and tail */
0405     desc_info = &info->desc_info[info->tail];
0406     if (!rocker_desc_gen(desc_info))
0407         return NULL; /* gen bit not set, desc is not ready yet */
0408     info->tail = __pos_inc(info->tail, info->size);
0409     desc_info->tlv_size = desc_info->desc->tlv_size;
0410     return desc_info;
0411 }
0412 
0413 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
0414                     const struct rocker_dma_ring_info *info,
0415                     u32 credits)
0416 {
0417     if (credits)
0418         rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
0419 }
0420 
0421 static unsigned long rocker_dma_ring_size_fix(size_t size)
0422 {
0423     return max(ROCKER_DMA_SIZE_MIN,
0424            min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
0425 }
0426 
0427 static int rocker_dma_ring_create(const struct rocker *rocker,
0428                   unsigned int type,
0429                   size_t size,
0430                   struct rocker_dma_ring_info *info)
0431 {
0432     int i;
0433 
0434     BUG_ON(size != rocker_dma_ring_size_fix(size));
0435     info->size = size;
0436     info->type = type;
0437     info->head = 0;
0438     info->tail = 0;
0439     info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
0440                   GFP_KERNEL);
0441     if (!info->desc_info)
0442         return -ENOMEM;
0443 
0444     info->desc = dma_alloc_coherent(&rocker->pdev->dev,
0445                     info->size * sizeof(*info->desc),
0446                     &info->mapaddr, GFP_KERNEL);
0447     if (!info->desc) {
0448         kfree(info->desc_info);
0449         return -ENOMEM;
0450     }
0451 
0452     for (i = 0; i < info->size; i++)
0453         info->desc_info[i].desc = &info->desc[i];
0454 
0455     rocker_write32(rocker, DMA_DESC_CTRL(info->type),
0456                ROCKER_DMA_DESC_CTRL_RESET);
0457     rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
0458     rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
0459 
0460     return 0;
0461 }
0462 
0463 static void rocker_dma_ring_destroy(const struct rocker *rocker,
0464                     const struct rocker_dma_ring_info *info)
0465 {
0466     rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
0467 
0468     dma_free_coherent(&rocker->pdev->dev,
0469               info->size * sizeof(struct rocker_desc), info->desc,
0470               info->mapaddr);
0471     kfree(info->desc_info);
0472 }
0473 
0474 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
0475                          struct rocker_dma_ring_info *info)
0476 {
0477     int i;
0478 
0479     BUG_ON(info->head || info->tail);
0480 
0481     /* When ring is consumer, we need to advance head for each desc.
0482      * That tells hw that the desc is ready to be used by it.
0483      */
0484     for (i = 0; i < info->size - 1; i++)
0485         rocker_desc_head_set(rocker, info, &info->desc_info[i]);
0486     rocker_desc_commit(&info->desc_info[i]);
0487 }
0488 
0489 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
0490                       const struct rocker_dma_ring_info *info,
0491                       int direction, size_t buf_size)
0492 {
0493     struct pci_dev *pdev = rocker->pdev;
0494     int i;
0495     int err;
0496 
0497     for (i = 0; i < info->size; i++) {
0498         struct rocker_desc_info *desc_info = &info->desc_info[i];
0499         struct rocker_desc *desc = &info->desc[i];
0500         dma_addr_t dma_handle;
0501         char *buf;
0502 
0503         buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
0504         if (!buf) {
0505             err = -ENOMEM;
0506             goto rollback;
0507         }
0508 
0509         dma_handle = dma_map_single(&pdev->dev, buf, buf_size,
0510                         direction);
0511         if (dma_mapping_error(&pdev->dev, dma_handle)) {
0512             kfree(buf);
0513             err = -EIO;
0514             goto rollback;
0515         }
0516 
0517         desc_info->data = buf;
0518         desc_info->data_size = buf_size;
0519         dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
0520 
0521         desc->buf_addr = dma_handle;
0522         desc->buf_size = buf_size;
0523     }
0524     return 0;
0525 
0526 rollback:
0527     for (i--; i >= 0; i--) {
0528         const struct rocker_desc_info *desc_info = &info->desc_info[i];
0529 
0530         dma_unmap_single(&pdev->dev,
0531                  dma_unmap_addr(desc_info, mapaddr),
0532                  desc_info->data_size, direction);
0533         kfree(desc_info->data);
0534     }
0535     return err;
0536 }
0537 
0538 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
0539                       const struct rocker_dma_ring_info *info,
0540                       int direction)
0541 {
0542     struct pci_dev *pdev = rocker->pdev;
0543     int i;
0544 
0545     for (i = 0; i < info->size; i++) {
0546         const struct rocker_desc_info *desc_info = &info->desc_info[i];
0547         struct rocker_desc *desc = &info->desc[i];
0548 
0549         desc->buf_addr = 0;
0550         desc->buf_size = 0;
0551         dma_unmap_single(&pdev->dev,
0552                  dma_unmap_addr(desc_info, mapaddr),
0553                  desc_info->data_size, direction);
0554         kfree(desc_info->data);
0555     }
0556 }
0557 
0558 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
0559 {
0560     struct rocker_wait *wait;
0561 
0562     wait = rocker_wait_create();
0563     if (!wait)
0564         return -ENOMEM;
0565     rocker_desc_cookie_ptr_set(desc_info, wait);
0566     return 0;
0567 }
0568 
0569 static void
0570 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
0571 {
0572     struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
0573 
0574     rocker_wait_destroy(wait);
0575 }
0576 
0577 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
0578 {
0579     const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
0580     int i;
0581     int err;
0582 
0583     for (i = 0; i < cmd_ring->size; i++) {
0584         err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
0585         if (err)
0586             goto rollback;
0587     }
0588     return 0;
0589 
0590 rollback:
0591     for (i--; i >= 0; i--)
0592         rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
0593     return err;
0594 }
0595 
0596 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
0597 {
0598     const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
0599     int i;
0600 
0601     for (i = 0; i < cmd_ring->size; i++)
0602         rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
0603 }
0604 
0605 static int rocker_dma_rings_init(struct rocker *rocker)
0606 {
0607     const struct pci_dev *pdev = rocker->pdev;
0608     int err;
0609 
0610     err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
0611                      ROCKER_DMA_CMD_DEFAULT_SIZE,
0612                      &rocker->cmd_ring);
0613     if (err) {
0614         dev_err(&pdev->dev, "failed to create command dma ring\n");
0615         return err;
0616     }
0617 
0618     spin_lock_init(&rocker->cmd_ring_lock);
0619 
0620     err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
0621                      DMA_BIDIRECTIONAL, PAGE_SIZE);
0622     if (err) {
0623         dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
0624         goto err_dma_cmd_ring_bufs_alloc;
0625     }
0626 
0627     err = rocker_dma_cmd_ring_waits_alloc(rocker);
0628     if (err) {
0629         dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
0630         goto err_dma_cmd_ring_waits_alloc;
0631     }
0632 
0633     err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
0634                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
0635                      &rocker->event_ring);
0636     if (err) {
0637         dev_err(&pdev->dev, "failed to create event dma ring\n");
0638         goto err_dma_event_ring_create;
0639     }
0640 
0641     err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
0642                      DMA_FROM_DEVICE, PAGE_SIZE);
0643     if (err) {
0644         dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
0645         goto err_dma_event_ring_bufs_alloc;
0646     }
0647     rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
0648     return 0;
0649 
0650 err_dma_event_ring_bufs_alloc:
0651     rocker_dma_ring_destroy(rocker, &rocker->event_ring);
0652 err_dma_event_ring_create:
0653     rocker_dma_cmd_ring_waits_free(rocker);
0654 err_dma_cmd_ring_waits_alloc:
0655     rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
0656                   DMA_BIDIRECTIONAL);
0657 err_dma_cmd_ring_bufs_alloc:
0658     rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
0659     return err;
0660 }
0661 
0662 static void rocker_dma_rings_fini(struct rocker *rocker)
0663 {
0664     rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
0665                   DMA_BIDIRECTIONAL);
0666     rocker_dma_ring_destroy(rocker, &rocker->event_ring);
0667     rocker_dma_cmd_ring_waits_free(rocker);
0668     rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
0669                   DMA_BIDIRECTIONAL);
0670     rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
0671 }
0672 
0673 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
0674                       struct rocker_desc_info *desc_info,
0675                       struct sk_buff *skb, size_t buf_len)
0676 {
0677     const struct rocker *rocker = rocker_port->rocker;
0678     struct pci_dev *pdev = rocker->pdev;
0679     dma_addr_t dma_handle;
0680 
0681     dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len,
0682                     DMA_FROM_DEVICE);
0683     if (dma_mapping_error(&pdev->dev, dma_handle))
0684         return -EIO;
0685     if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
0686         goto tlv_put_failure;
0687     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
0688         goto tlv_put_failure;
0689     return 0;
0690 
0691 tlv_put_failure:
0692     dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE);
0693     desc_info->tlv_size = 0;
0694     return -EMSGSIZE;
0695 }
0696 
0697 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
0698 {
0699     return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
0700 }
0701 
0702 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
0703                     struct rocker_desc_info *desc_info)
0704 {
0705     struct net_device *dev = rocker_port->dev;
0706     struct sk_buff *skb;
0707     size_t buf_len = rocker_port_rx_buf_len(rocker_port);
0708     int err;
0709 
0710     /* Ensure that hw will see tlv_size zero in case of an error.
0711      * That tells hw to use another descriptor.
0712      */
0713     rocker_desc_cookie_ptr_set(desc_info, NULL);
0714     desc_info->tlv_size = 0;
0715 
0716     skb = netdev_alloc_skb_ip_align(dev, buf_len);
0717     if (!skb)
0718         return -ENOMEM;
0719     err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
0720     if (err) {
0721         dev_kfree_skb_any(skb);
0722         return err;
0723     }
0724     rocker_desc_cookie_ptr_set(desc_info, skb);
0725     return 0;
0726 }
0727 
0728 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
0729                      const struct rocker_tlv **attrs)
0730 {
0731     struct pci_dev *pdev = rocker->pdev;
0732     dma_addr_t dma_handle;
0733     size_t len;
0734 
0735     if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
0736         !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
0737         return;
0738     dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
0739     len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
0740     dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE);
0741 }
0742 
0743 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
0744                     const struct rocker_desc_info *desc_info)
0745 {
0746     const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
0747     struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
0748 
0749     if (!skb)
0750         return;
0751     rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
0752     rocker_dma_rx_ring_skb_unmap(rocker, attrs);
0753     dev_kfree_skb_any(skb);
0754 }
0755 
0756 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
0757 {
0758     const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
0759     const struct rocker *rocker = rocker_port->rocker;
0760     int i;
0761     int err;
0762 
0763     for (i = 0; i < rx_ring->size; i++) {
0764         err = rocker_dma_rx_ring_skb_alloc(rocker_port,
0765                            &rx_ring->desc_info[i]);
0766         if (err)
0767             goto rollback;
0768     }
0769     return 0;
0770 
0771 rollback:
0772     for (i--; i >= 0; i--)
0773         rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
0774     return err;
0775 }
0776 
0777 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
0778 {
0779     const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
0780     const struct rocker *rocker = rocker_port->rocker;
0781     int i;
0782 
0783     for (i = 0; i < rx_ring->size; i++)
0784         rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
0785 }
0786 
0787 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
0788 {
0789     struct rocker *rocker = rocker_port->rocker;
0790     int err;
0791 
0792     err = rocker_dma_ring_create(rocker,
0793                      ROCKER_DMA_TX(rocker_port->port_number),
0794                      ROCKER_DMA_TX_DEFAULT_SIZE,
0795                      &rocker_port->tx_ring);
0796     if (err) {
0797         netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
0798         return err;
0799     }
0800 
0801     err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
0802                      DMA_TO_DEVICE,
0803                      ROCKER_DMA_TX_DESC_SIZE);
0804     if (err) {
0805         netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
0806         goto err_dma_tx_ring_bufs_alloc;
0807     }
0808 
0809     err = rocker_dma_ring_create(rocker,
0810                      ROCKER_DMA_RX(rocker_port->port_number),
0811                      ROCKER_DMA_RX_DEFAULT_SIZE,
0812                      &rocker_port->rx_ring);
0813     if (err) {
0814         netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
0815         goto err_dma_rx_ring_create;
0816     }
0817 
0818     err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
0819                      DMA_BIDIRECTIONAL,
0820                      ROCKER_DMA_RX_DESC_SIZE);
0821     if (err) {
0822         netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
0823         goto err_dma_rx_ring_bufs_alloc;
0824     }
0825 
0826     err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
0827     if (err) {
0828         netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
0829         goto err_dma_rx_ring_skbs_alloc;
0830     }
0831     rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
0832 
0833     return 0;
0834 
0835 err_dma_rx_ring_skbs_alloc:
0836     rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
0837                   DMA_BIDIRECTIONAL);
0838 err_dma_rx_ring_bufs_alloc:
0839     rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
0840 err_dma_rx_ring_create:
0841     rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
0842                   DMA_TO_DEVICE);
0843 err_dma_tx_ring_bufs_alloc:
0844     rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
0845     return err;
0846 }
0847 
0848 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
0849 {
0850     struct rocker *rocker = rocker_port->rocker;
0851 
0852     rocker_dma_rx_ring_skbs_free(rocker_port);
0853     rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
0854                   DMA_BIDIRECTIONAL);
0855     rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
0856     rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
0857                   DMA_TO_DEVICE);
0858     rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
0859 }
0860 
0861 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
0862                    bool enable)
0863 {
0864     u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
0865 
0866     if (enable)
0867         val |= 1ULL << rocker_port->pport;
0868     else
0869         val &= ~(1ULL << rocker_port->pport);
0870     rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
0871 }
0872 
0873 /********************************
0874  * Interrupt handler and helpers
0875  ********************************/
0876 
0877 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
0878 {
0879     struct rocker *rocker = dev_id;
0880     const struct rocker_desc_info *desc_info;
0881     struct rocker_wait *wait;
0882     u32 credits = 0;
0883 
0884     spin_lock(&rocker->cmd_ring_lock);
0885     while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
0886         wait = rocker_desc_cookie_ptr_get(desc_info);
0887         if (wait->nowait) {
0888             rocker_desc_gen_clear(desc_info);
0889         } else {
0890             rocker_wait_wake_up(wait);
0891         }
0892         credits++;
0893     }
0894     spin_unlock(&rocker->cmd_ring_lock);
0895     rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
0896 
0897     return IRQ_HANDLED;
0898 }
0899 
0900 static void rocker_port_link_up(const struct rocker_port *rocker_port)
0901 {
0902     netif_carrier_on(rocker_port->dev);
0903     netdev_info(rocker_port->dev, "Link is up\n");
0904 }
0905 
0906 static void rocker_port_link_down(const struct rocker_port *rocker_port)
0907 {
0908     netif_carrier_off(rocker_port->dev);
0909     netdev_info(rocker_port->dev, "Link is down\n");
0910 }
0911 
0912 static int rocker_event_link_change(const struct rocker *rocker,
0913                     const struct rocker_tlv *info)
0914 {
0915     const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
0916     unsigned int port_number;
0917     bool link_up;
0918     struct rocker_port *rocker_port;
0919 
0920     rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
0921     if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
0922         !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
0923         return -EIO;
0924     port_number =
0925         rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
0926     link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
0927 
0928     if (port_number >= rocker->port_count)
0929         return -EINVAL;
0930 
0931     rocker_port = rocker->ports[port_number];
0932     if (netif_carrier_ok(rocker_port->dev) != link_up) {
0933         if (link_up)
0934             rocker_port_link_up(rocker_port);
0935         else
0936             rocker_port_link_down(rocker_port);
0937     }
0938 
0939     return 0;
0940 }
0941 
0942 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
0943                           const unsigned char *addr,
0944                           __be16 vlan_id);
0945 
0946 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
0947                       const struct rocker_tlv *info)
0948 {
0949     const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
0950     unsigned int port_number;
0951     struct rocker_port *rocker_port;
0952     const unsigned char *addr;
0953     __be16 vlan_id;
0954 
0955     rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
0956     if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
0957         !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
0958         !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
0959         return -EIO;
0960     port_number =
0961         rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
0962     addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
0963     vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
0964 
0965     if (port_number >= rocker->port_count)
0966         return -EINVAL;
0967 
0968     rocker_port = rocker->ports[port_number];
0969     return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
0970 }
0971 
0972 static int rocker_event_process(const struct rocker *rocker,
0973                 const struct rocker_desc_info *desc_info)
0974 {
0975     const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
0976     const struct rocker_tlv *info;
0977     u16 type;
0978 
0979     rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
0980     if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
0981         !attrs[ROCKER_TLV_EVENT_INFO])
0982         return -EIO;
0983 
0984     type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
0985     info = attrs[ROCKER_TLV_EVENT_INFO];
0986 
0987     switch (type) {
0988     case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
0989         return rocker_event_link_change(rocker, info);
0990     case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
0991         return rocker_event_mac_vlan_seen(rocker, info);
0992     }
0993 
0994     return -EOPNOTSUPP;
0995 }
0996 
0997 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
0998 {
0999     struct rocker *rocker = dev_id;
1000     const struct pci_dev *pdev = rocker->pdev;
1001     const struct rocker_desc_info *desc_info;
1002     u32 credits = 0;
1003     int err;
1004 
1005     while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1006         err = rocker_desc_err(desc_info);
1007         if (err) {
1008             dev_err(&pdev->dev, "event desc received with err %d\n",
1009                 err);
1010         } else {
1011             err = rocker_event_process(rocker, desc_info);
1012             if (err)
1013                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1014                     err);
1015         }
1016         rocker_desc_gen_clear(desc_info);
1017         rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1018         credits++;
1019     }
1020     rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1021 
1022     return IRQ_HANDLED;
1023 }
1024 
1025 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1026 {
1027     struct rocker_port *rocker_port = dev_id;
1028 
1029     napi_schedule(&rocker_port->napi_tx);
1030     return IRQ_HANDLED;
1031 }
1032 
1033 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1034 {
1035     struct rocker_port *rocker_port = dev_id;
1036 
1037     napi_schedule(&rocker_port->napi_rx);
1038     return IRQ_HANDLED;
1039 }
1040 
1041 /********************
1042  * Command interface
1043  ********************/
1044 
1045 int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait,
1046             rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1047             rocker_cmd_proc_cb_t process, void *process_priv)
1048 {
1049     struct rocker *rocker = rocker_port->rocker;
1050     struct rocker_desc_info *desc_info;
1051     struct rocker_wait *wait;
1052     unsigned long lock_flags;
1053     int err;
1054 
1055     spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1056 
1057     desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1058     if (!desc_info) {
1059         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1060         return -EAGAIN;
1061     }
1062 
1063     wait = rocker_desc_cookie_ptr_get(desc_info);
1064     rocker_wait_init(wait);
1065     wait->nowait = nowait;
1066 
1067     err = prepare(rocker_port, desc_info, prepare_priv);
1068     if (err) {
1069         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1070         return err;
1071     }
1072 
1073     rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1074 
1075     spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1076 
1077     if (nowait)
1078         return 0;
1079 
1080     if (!rocker_wait_event_timeout(wait, HZ / 10))
1081         return -EIO;
1082 
1083     err = rocker_desc_err(desc_info);
1084     if (err)
1085         return err;
1086 
1087     if (process)
1088         err = process(rocker_port, desc_info, process_priv);
1089 
1090     rocker_desc_gen_clear(desc_info);
1091     return err;
1092 }
1093 
1094 static int
1095 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1096                   struct rocker_desc_info *desc_info,
1097                   void *priv)
1098 {
1099     struct rocker_tlv *cmd_info;
1100 
1101     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1102                    ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1103         return -EMSGSIZE;
1104     cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1105     if (!cmd_info)
1106         return -EMSGSIZE;
1107     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1108                    rocker_port->pport))
1109         return -EMSGSIZE;
1110     rocker_tlv_nest_end(desc_info, cmd_info);
1111     return 0;
1112 }
1113 
1114 static int
1115 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1116                       const struct rocker_desc_info *desc_info,
1117                       void *priv)
1118 {
1119     struct ethtool_link_ksettings *ecmd = priv;
1120     const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1121     const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1122     u32 speed;
1123     u8 duplex;
1124     u8 autoneg;
1125 
1126     rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1127     if (!attrs[ROCKER_TLV_CMD_INFO])
1128         return -EIO;
1129 
1130     rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1131                 attrs[ROCKER_TLV_CMD_INFO]);
1132     if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1133         !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1134         !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1135         return -EIO;
1136 
1137     speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1138     duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1139     autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1140 
1141     ethtool_link_ksettings_zero_link_mode(ecmd, supported);
1142     ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
1143 
1144     ecmd->base.phy_address = 0xff;
1145     ecmd->base.port = PORT_TP;
1146     ecmd->base.speed = speed;
1147     ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1148     ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1149 
1150     return 0;
1151 }
1152 
1153 static int
1154 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1155                       const struct rocker_desc_info *desc_info,
1156                       void *priv)
1157 {
1158     unsigned char *macaddr = priv;
1159     const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1160     const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1161     const struct rocker_tlv *attr;
1162 
1163     rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1164     if (!attrs[ROCKER_TLV_CMD_INFO])
1165         return -EIO;
1166 
1167     rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1168                 attrs[ROCKER_TLV_CMD_INFO]);
1169     attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1170     if (!attr)
1171         return -EIO;
1172 
1173     if (rocker_tlv_len(attr) != ETH_ALEN)
1174         return -EINVAL;
1175 
1176     ether_addr_copy(macaddr, rocker_tlv_data(attr));
1177     return 0;
1178 }
1179 
1180 static int
1181 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1182                        const struct rocker_desc_info *desc_info,
1183                        void *priv)
1184 {
1185     u8 *p_mode = priv;
1186     const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1187     const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1188     const struct rocker_tlv *attr;
1189 
1190     rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1191     if (!attrs[ROCKER_TLV_CMD_INFO])
1192         return -EIO;
1193 
1194     rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1195                 attrs[ROCKER_TLV_CMD_INFO]);
1196     attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1197     if (!attr)
1198         return -EIO;
1199 
1200     *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1201     return 0;
1202 }
1203 
1204 struct port_name {
1205     char *buf;
1206     size_t len;
1207 };
1208 
1209 static int
1210 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1211                         const struct rocker_desc_info *desc_info,
1212                         void *priv)
1213 {
1214     const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1215     const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1216     struct port_name *name = priv;
1217     const struct rocker_tlv *attr;
1218     size_t i, j, len;
1219     const char *str;
1220 
1221     rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1222     if (!attrs[ROCKER_TLV_CMD_INFO])
1223         return -EIO;
1224 
1225     rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1226                 attrs[ROCKER_TLV_CMD_INFO]);
1227     attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1228     if (!attr)
1229         return -EIO;
1230 
1231     len = min_t(size_t, rocker_tlv_len(attr), name->len);
1232     str = rocker_tlv_data(attr);
1233 
1234     /* make sure name only contains alphanumeric characters */
1235     for (i = j = 0; i < len; ++i) {
1236         if (isalnum(str[i])) {
1237             name->buf[j] = str[i];
1238             j++;
1239         }
1240     }
1241 
1242     if (j == 0)
1243         return -EIO;
1244 
1245     name->buf[j] = '\0';
1246 
1247     return 0;
1248 }
1249 
1250 static int
1251 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1252                       struct rocker_desc_info *desc_info,
1253                       void *priv)
1254 {
1255     struct ethtool_link_ksettings *ecmd = priv;
1256     struct rocker_tlv *cmd_info;
1257 
1258     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1259                    ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1260         return -EMSGSIZE;
1261     cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1262     if (!cmd_info)
1263         return -EMSGSIZE;
1264     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1265                    rocker_port->pport))
1266         return -EMSGSIZE;
1267     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1268                    ecmd->base.speed))
1269         return -EMSGSIZE;
1270     if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1271                   ecmd->base.duplex))
1272         return -EMSGSIZE;
1273     if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1274                   ecmd->base.autoneg))
1275         return -EMSGSIZE;
1276     rocker_tlv_nest_end(desc_info, cmd_info);
1277     return 0;
1278 }
1279 
1280 static int
1281 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1282                       struct rocker_desc_info *desc_info,
1283                       void *priv)
1284 {
1285     const unsigned char *macaddr = priv;
1286     struct rocker_tlv *cmd_info;
1287 
1288     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1289                    ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1290         return -EMSGSIZE;
1291     cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1292     if (!cmd_info)
1293         return -EMSGSIZE;
1294     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1295                    rocker_port->pport))
1296         return -EMSGSIZE;
1297     if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1298                ETH_ALEN, macaddr))
1299         return -EMSGSIZE;
1300     rocker_tlv_nest_end(desc_info, cmd_info);
1301     return 0;
1302 }
1303 
1304 static int
1305 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1306                       struct rocker_desc_info *desc_info,
1307                       void *priv)
1308 {
1309     int mtu = *(int *)priv;
1310     struct rocker_tlv *cmd_info;
1311 
1312     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1313                    ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1314         return -EMSGSIZE;
1315     cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1316     if (!cmd_info)
1317         return -EMSGSIZE;
1318     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1319                    rocker_port->pport))
1320         return -EMSGSIZE;
1321     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1322                    mtu))
1323         return -EMSGSIZE;
1324     rocker_tlv_nest_end(desc_info, cmd_info);
1325     return 0;
1326 }
1327 
1328 static int
1329 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1330                   struct rocker_desc_info *desc_info,
1331                   void *priv)
1332 {
1333     bool learning = *(bool *)priv;
1334     struct rocker_tlv *cmd_info;
1335 
1336     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1337                    ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1338         return -EMSGSIZE;
1339     cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1340     if (!cmd_info)
1341         return -EMSGSIZE;
1342     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1343                    rocker_port->pport))
1344         return -EMSGSIZE;
1345     if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1346                   learning))
1347         return -EMSGSIZE;
1348     rocker_tlv_nest_end(desc_info, cmd_info);
1349     return 0;
1350 }
1351 
1352 static int
1353 rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1354                      struct ethtool_link_ksettings *ecmd)
1355 {
1356     return rocker_cmd_exec(rocker_port, false,
1357                    rocker_cmd_get_port_settings_prep, NULL,
1358                    rocker_cmd_get_port_settings_ethtool_proc,
1359                    ecmd);
1360 }
1361 
1362 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1363                         unsigned char *macaddr)
1364 {
1365     return rocker_cmd_exec(rocker_port, false,
1366                    rocker_cmd_get_port_settings_prep, NULL,
1367                    rocker_cmd_get_port_settings_macaddr_proc,
1368                    macaddr);
1369 }
1370 
1371 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1372                          u8 *p_mode)
1373 {
1374     return rocker_cmd_exec(rocker_port, false,
1375                    rocker_cmd_get_port_settings_prep, NULL,
1376                    rocker_cmd_get_port_settings_mode_proc, p_mode);
1377 }
1378 
1379 static int
1380 rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1381                      const struct ethtool_link_ksettings *ecmd)
1382 {
1383     struct ethtool_link_ksettings copy_ecmd;
1384 
1385     memcpy(&copy_ecmd, ecmd, sizeof(copy_ecmd));
1386 
1387     return rocker_cmd_exec(rocker_port, false,
1388                    rocker_cmd_set_port_settings_ethtool_prep,
1389                    &copy_ecmd, NULL, NULL);
1390 }
1391 
1392 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1393                         unsigned char *macaddr)
1394 {
1395     return rocker_cmd_exec(rocker_port, false,
1396                    rocker_cmd_set_port_settings_macaddr_prep,
1397                    macaddr, NULL, NULL);
1398 }
1399 
1400 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1401                         int mtu)
1402 {
1403     return rocker_cmd_exec(rocker_port, false,
1404                    rocker_cmd_set_port_settings_mtu_prep,
1405                    &mtu, NULL, NULL);
1406 }
1407 
1408 int rocker_port_set_learning(struct rocker_port *rocker_port,
1409                  bool learning)
1410 {
1411     return rocker_cmd_exec(rocker_port, false,
1412                    rocker_cmd_set_port_learning_prep,
1413                    &learning, NULL, NULL);
1414 }
1415 
1416 /**********************
1417  * Worlds manipulation
1418  **********************/
1419 
1420 static struct rocker_world_ops *rocker_world_ops[] = {
1421     &rocker_ofdpa_ops,
1422 };
1423 
1424 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1425 
1426 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1427 {
1428     int i;
1429 
1430     for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1431         if (rocker_world_ops[i]->mode == mode)
1432             return rocker_world_ops[i];
1433     return NULL;
1434 }
1435 
1436 static int rocker_world_init(struct rocker *rocker, u8 mode)
1437 {
1438     struct rocker_world_ops *wops;
1439     int err;
1440 
1441     wops = rocker_world_ops_find(mode);
1442     if (!wops) {
1443         dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1444             mode);
1445         return -EINVAL;
1446     }
1447     rocker->wops = wops;
1448     rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1449     if (!rocker->wpriv)
1450         return -ENOMEM;
1451     if (!wops->init)
1452         return 0;
1453     err = wops->init(rocker);
1454     if (err)
1455         kfree(rocker->wpriv);
1456     return err;
1457 }
1458 
1459 static void rocker_world_fini(struct rocker *rocker)
1460 {
1461     struct rocker_world_ops *wops = rocker->wops;
1462 
1463     if (!wops || !wops->fini)
1464         return;
1465     wops->fini(rocker);
1466     kfree(rocker->wpriv);
1467 }
1468 
1469 static int rocker_world_check_init(struct rocker_port *rocker_port)
1470 {
1471     struct rocker *rocker = rocker_port->rocker;
1472     u8 mode;
1473     int err;
1474 
1475     err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1476     if (err) {
1477         dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1478         return err;
1479     }
1480     if (rocker->wops) {
1481         if (rocker->wops->mode != mode) {
1482             dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1483             return -EINVAL;
1484         }
1485         return 0;
1486     }
1487     return rocker_world_init(rocker, mode);
1488 }
1489 
1490 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1491 {
1492     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1493     int err;
1494 
1495     rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1496     if (!rocker_port->wpriv)
1497         return -ENOMEM;
1498     if (!wops->port_pre_init)
1499         return 0;
1500     err = wops->port_pre_init(rocker_port);
1501     if (err)
1502         kfree(rocker_port->wpriv);
1503     return 0;
1504 }
1505 
1506 static int rocker_world_port_init(struct rocker_port *rocker_port)
1507 {
1508     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1509 
1510     if (!wops->port_init)
1511         return 0;
1512     return wops->port_init(rocker_port);
1513 }
1514 
1515 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1516 {
1517     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1518 
1519     if (!wops->port_fini)
1520         return;
1521     wops->port_fini(rocker_port);
1522 }
1523 
1524 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1525 {
1526     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1527 
1528     if (!wops->port_post_fini)
1529         return;
1530     wops->port_post_fini(rocker_port);
1531     kfree(rocker_port->wpriv);
1532 }
1533 
1534 static int rocker_world_port_open(struct rocker_port *rocker_port)
1535 {
1536     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1537 
1538     if (!wops->port_open)
1539         return 0;
1540     return wops->port_open(rocker_port);
1541 }
1542 
1543 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1544 {
1545     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1546 
1547     if (!wops->port_stop)
1548         return;
1549     wops->port_stop(rocker_port);
1550 }
1551 
1552 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1553                         u8 state)
1554 {
1555     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1556 
1557     if (!wops->port_attr_stp_state_set)
1558         return -EOPNOTSUPP;
1559 
1560     return wops->port_attr_stp_state_set(rocker_port, state);
1561 }
1562 
1563 static int
1564 rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
1565                         rocker_port,
1566                         unsigned long *
1567                         p_brport_flags_support)
1568 {
1569     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1570 
1571     if (!wops->port_attr_bridge_flags_support_get)
1572         return -EOPNOTSUPP;
1573     return wops->port_attr_bridge_flags_support_get(rocker_port,
1574                             p_brport_flags_support);
1575 }
1576 
1577 static int
1578 rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
1579                         struct switchdev_brport_flags flags)
1580 {
1581     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1582     unsigned long brport_flags_s;
1583     int err;
1584 
1585     if (!wops->port_attr_bridge_flags_set)
1586         return -EOPNOTSUPP;
1587 
1588     err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
1589                                   &brport_flags_s);
1590     if (err)
1591         return err;
1592 
1593     if (flags.mask & ~brport_flags_s)
1594         return -EINVAL;
1595 
1596     return 0;
1597 }
1598 
1599 static int
1600 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1601                     struct switchdev_brport_flags flags)
1602 {
1603     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1604 
1605     if (!wops->port_attr_bridge_flags_set)
1606         return -EOPNOTSUPP;
1607 
1608     return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
1609 }
1610 
1611 static int
1612 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1613                           u32 ageing_time)
1614 {
1615     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1616 
1617     if (!wops->port_attr_bridge_ageing_time_set)
1618         return -EOPNOTSUPP;
1619 
1620     return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
1621 }
1622 
1623 static int
1624 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1625                    const struct switchdev_obj_port_vlan *vlan)
1626 {
1627     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1628 
1629     if (!wops->port_obj_vlan_add)
1630         return -EOPNOTSUPP;
1631 
1632     return wops->port_obj_vlan_add(rocker_port, vlan);
1633 }
1634 
1635 static int
1636 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1637                    const struct switchdev_obj_port_vlan *vlan)
1638 {
1639     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1640 
1641     if (netif_is_bridge_master(vlan->obj.orig_dev))
1642         return -EOPNOTSUPP;
1643 
1644     if (!wops->port_obj_vlan_del)
1645         return -EOPNOTSUPP;
1646     return wops->port_obj_vlan_del(rocker_port, vlan);
1647 }
1648 
1649 static int
1650 rocker_world_port_fdb_add(struct rocker_port *rocker_port,
1651               struct switchdev_notifier_fdb_info *info)
1652 {
1653     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1654 
1655     if (!wops->port_obj_fdb_add)
1656         return -EOPNOTSUPP;
1657 
1658     return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr);
1659 }
1660 
1661 static int
1662 rocker_world_port_fdb_del(struct rocker_port *rocker_port,
1663               struct switchdev_notifier_fdb_info *info)
1664 {
1665     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1666 
1667     if (!wops->port_obj_fdb_del)
1668         return -EOPNOTSUPP;
1669     return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr);
1670 }
1671 
1672 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1673                        struct net_device *master,
1674                        struct netlink_ext_ack *extack)
1675 {
1676     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1677 
1678     if (!wops->port_master_linked)
1679         return -EOPNOTSUPP;
1680     return wops->port_master_linked(rocker_port, master, extack);
1681 }
1682 
1683 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1684                          struct net_device *master)
1685 {
1686     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1687 
1688     if (!wops->port_master_unlinked)
1689         return -EOPNOTSUPP;
1690     return wops->port_master_unlinked(rocker_port, master);
1691 }
1692 
1693 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1694                       struct neighbour *n)
1695 {
1696     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1697 
1698     if (!wops->port_neigh_update)
1699         return -EOPNOTSUPP;
1700     return wops->port_neigh_update(rocker_port, n);
1701 }
1702 
1703 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1704                        struct neighbour *n)
1705 {
1706     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1707 
1708     if (!wops->port_neigh_destroy)
1709         return -EOPNOTSUPP;
1710     return wops->port_neigh_destroy(rocker_port, n);
1711 }
1712 
1713 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1714                           const unsigned char *addr,
1715                           __be16 vlan_id)
1716 {
1717     struct rocker_world_ops *wops = rocker_port->rocker->wops;
1718 
1719     if (!wops->port_ev_mac_vlan_seen)
1720         return -EOPNOTSUPP;
1721     return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1722 }
1723 
1724 static int rocker_world_fib4_add(struct rocker *rocker,
1725                  const struct fib_entry_notifier_info *fen_info)
1726 {
1727     struct rocker_world_ops *wops = rocker->wops;
1728 
1729     if (!wops->fib4_add)
1730         return 0;
1731     return wops->fib4_add(rocker, fen_info);
1732 }
1733 
1734 static int rocker_world_fib4_del(struct rocker *rocker,
1735                  const struct fib_entry_notifier_info *fen_info)
1736 {
1737     struct rocker_world_ops *wops = rocker->wops;
1738 
1739     if (!wops->fib4_del)
1740         return 0;
1741     return wops->fib4_del(rocker, fen_info);
1742 }
1743 
1744 static void rocker_world_fib4_abort(struct rocker *rocker)
1745 {
1746     struct rocker_world_ops *wops = rocker->wops;
1747 
1748     if (wops->fib4_abort)
1749         wops->fib4_abort(rocker);
1750 }
1751 
1752 /*****************
1753  * Net device ops
1754  *****************/
1755 
1756 static int rocker_port_open(struct net_device *dev)
1757 {
1758     struct rocker_port *rocker_port = netdev_priv(dev);
1759     int err;
1760 
1761     err = rocker_port_dma_rings_init(rocker_port);
1762     if (err)
1763         return err;
1764 
1765     err = request_irq(rocker_msix_tx_vector(rocker_port),
1766               rocker_tx_irq_handler, 0,
1767               rocker_driver_name, rocker_port);
1768     if (err) {
1769         netdev_err(rocker_port->dev, "cannot assign tx irq\n");
1770         goto err_request_tx_irq;
1771     }
1772 
1773     err = request_irq(rocker_msix_rx_vector(rocker_port),
1774               rocker_rx_irq_handler, 0,
1775               rocker_driver_name, rocker_port);
1776     if (err) {
1777         netdev_err(rocker_port->dev, "cannot assign rx irq\n");
1778         goto err_request_rx_irq;
1779     }
1780 
1781     err = rocker_world_port_open(rocker_port);
1782     if (err) {
1783         netdev_err(rocker_port->dev, "cannot open port in world\n");
1784         goto err_world_port_open;
1785     }
1786 
1787     napi_enable(&rocker_port->napi_tx);
1788     napi_enable(&rocker_port->napi_rx);
1789     if (!dev->proto_down)
1790         rocker_port_set_enable(rocker_port, true);
1791     netif_start_queue(dev);
1792     return 0;
1793 
1794 err_world_port_open:
1795     free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
1796 err_request_rx_irq:
1797     free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
1798 err_request_tx_irq:
1799     rocker_port_dma_rings_fini(rocker_port);
1800     return err;
1801 }
1802 
1803 static int rocker_port_stop(struct net_device *dev)
1804 {
1805     struct rocker_port *rocker_port = netdev_priv(dev);
1806 
1807     netif_stop_queue(dev);
1808     rocker_port_set_enable(rocker_port, false);
1809     napi_disable(&rocker_port->napi_rx);
1810     napi_disable(&rocker_port->napi_tx);
1811     rocker_world_port_stop(rocker_port);
1812     free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
1813     free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
1814     rocker_port_dma_rings_fini(rocker_port);
1815 
1816     return 0;
1817 }
1818 
1819 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
1820                        const struct rocker_desc_info *desc_info)
1821 {
1822     const struct rocker *rocker = rocker_port->rocker;
1823     struct pci_dev *pdev = rocker->pdev;
1824     const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
1825     struct rocker_tlv *attr;
1826     int rem;
1827 
1828     rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
1829     if (!attrs[ROCKER_TLV_TX_FRAGS])
1830         return;
1831     rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
1832         const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
1833         dma_addr_t dma_handle;
1834         size_t len;
1835 
1836         if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
1837             continue;
1838         rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
1839                     attr);
1840         if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
1841             !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
1842             continue;
1843         dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
1844         len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
1845         dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE);
1846     }
1847 }
1848 
1849 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
1850                        struct rocker_desc_info *desc_info,
1851                        char *buf, size_t buf_len)
1852 {
1853     const struct rocker *rocker = rocker_port->rocker;
1854     struct pci_dev *pdev = rocker->pdev;
1855     dma_addr_t dma_handle;
1856     struct rocker_tlv *frag;
1857 
1858     dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE);
1859     if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) {
1860         if (net_ratelimit())
1861             netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
1862         return -EIO;
1863     }
1864     frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
1865     if (!frag)
1866         goto unmap_frag;
1867     if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
1868                    dma_handle))
1869         goto nest_cancel;
1870     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
1871                    buf_len))
1872         goto nest_cancel;
1873     rocker_tlv_nest_end(desc_info, frag);
1874     return 0;
1875 
1876 nest_cancel:
1877     rocker_tlv_nest_cancel(desc_info, frag);
1878 unmap_frag:
1879     dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE);
1880     return -EMSGSIZE;
1881 }
1882 
1883 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
1884 {
1885     struct rocker_port *rocker_port = netdev_priv(dev);
1886     struct rocker *rocker = rocker_port->rocker;
1887     struct rocker_desc_info *desc_info;
1888     struct rocker_tlv *frags;
1889     int i;
1890     int err;
1891 
1892     desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
1893     if (unlikely(!desc_info)) {
1894         if (net_ratelimit())
1895             netdev_err(dev, "tx ring full when queue awake\n");
1896         return NETDEV_TX_BUSY;
1897     }
1898 
1899     rocker_desc_cookie_ptr_set(desc_info, skb);
1900 
1901     frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
1902     if (!frags)
1903         goto out;
1904     err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
1905                       skb->data, skb_headlen(skb));
1906     if (err)
1907         goto nest_cancel;
1908     if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
1909         err = skb_linearize(skb);
1910         if (err)
1911             goto unmap_frags;
1912     }
1913 
1914     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1915         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1916 
1917         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
1918                           skb_frag_address(frag),
1919                           skb_frag_size(frag));
1920         if (err)
1921             goto unmap_frags;
1922     }
1923     rocker_tlv_nest_end(desc_info, frags);
1924 
1925     rocker_desc_gen_clear(desc_info);
1926     rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
1927 
1928     desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
1929     if (!desc_info)
1930         netif_stop_queue(dev);
1931 
1932     return NETDEV_TX_OK;
1933 
1934 unmap_frags:
1935     rocker_tx_desc_frags_unmap(rocker_port, desc_info);
1936 nest_cancel:
1937     rocker_tlv_nest_cancel(desc_info, frags);
1938 out:
1939     dev_kfree_skb(skb);
1940     dev->stats.tx_dropped++;
1941 
1942     return NETDEV_TX_OK;
1943 }
1944 
1945 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
1946 {
1947     struct sockaddr *addr = p;
1948     struct rocker_port *rocker_port = netdev_priv(dev);
1949     int err;
1950 
1951     if (!is_valid_ether_addr(addr->sa_data))
1952         return -EADDRNOTAVAIL;
1953 
1954     err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
1955     if (err)
1956         return err;
1957     eth_hw_addr_set(dev, addr->sa_data);
1958     return 0;
1959 }
1960 
1961 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
1962 {
1963     struct rocker_port *rocker_port = netdev_priv(dev);
1964     int running = netif_running(dev);
1965     int err;
1966 
1967     if (running)
1968         rocker_port_stop(dev);
1969 
1970     netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
1971     dev->mtu = new_mtu;
1972 
1973     err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
1974     if (err)
1975         return err;
1976 
1977     if (running)
1978         err = rocker_port_open(dev);
1979 
1980     return err;
1981 }
1982 
1983 static int rocker_port_get_phys_port_name(struct net_device *dev,
1984                       char *buf, size_t len)
1985 {
1986     struct rocker_port *rocker_port = netdev_priv(dev);
1987     struct port_name name = { .buf = buf, .len = len };
1988     int err;
1989 
1990     err = rocker_cmd_exec(rocker_port, false,
1991                   rocker_cmd_get_port_settings_prep, NULL,
1992                   rocker_cmd_get_port_settings_phys_name_proc,
1993                   &name);
1994 
1995     return err ? -EOPNOTSUPP : 0;
1996 }
1997 
1998 static void rocker_port_neigh_destroy(struct net_device *dev,
1999                       struct neighbour *n)
2000 {
2001     struct rocker_port *rocker_port = netdev_priv(n->dev);
2002     int err;
2003 
2004     err = rocker_world_port_neigh_destroy(rocker_port, n);
2005     if (err)
2006         netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
2007                 err);
2008 }
2009 
2010 static int rocker_port_get_port_parent_id(struct net_device *dev,
2011                       struct netdev_phys_item_id *ppid)
2012 {
2013     const struct rocker_port *rocker_port = netdev_priv(dev);
2014     const struct rocker *rocker = rocker_port->rocker;
2015 
2016     ppid->id_len = sizeof(rocker->hw.id);
2017     memcpy(&ppid->id, &rocker->hw.id, ppid->id_len);
2018 
2019     return 0;
2020 }
2021 
2022 static const struct net_device_ops rocker_port_netdev_ops = {
2023     .ndo_open           = rocker_port_open,
2024     .ndo_stop           = rocker_port_stop,
2025     .ndo_start_xmit         = rocker_port_xmit,
2026     .ndo_set_mac_address        = rocker_port_set_mac_address,
2027     .ndo_change_mtu         = rocker_port_change_mtu,
2028     .ndo_get_phys_port_name     = rocker_port_get_phys_port_name,
2029     .ndo_neigh_destroy      = rocker_port_neigh_destroy,
2030     .ndo_get_port_parent_id     = rocker_port_get_port_parent_id,
2031 };
2032 
2033 /********************
2034  * swdev interface
2035  ********************/
2036 
2037 static int rocker_port_attr_set(struct net_device *dev,
2038                 const struct switchdev_attr *attr)
2039 {
2040     struct rocker_port *rocker_port = netdev_priv(dev);
2041     int err = 0;
2042 
2043     switch (attr->id) {
2044     case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
2045         err = rocker_world_port_attr_stp_state_set(rocker_port,
2046                                attr->u.stp_state);
2047         break;
2048     case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
2049         err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
2050                                   attr->u.brport_flags);
2051         break;
2052     case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
2053         err = rocker_world_port_attr_bridge_flags_set(rocker_port,
2054                                   attr->u.brport_flags);
2055         break;
2056     case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
2057         err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
2058                                     attr->u.ageing_time);
2059         break;
2060     default:
2061         err = -EOPNOTSUPP;
2062         break;
2063     }
2064 
2065     return err;
2066 }
2067 
2068 static int rocker_port_obj_add(struct net_device *dev,
2069                    const struct switchdev_obj *obj)
2070 {
2071     struct rocker_port *rocker_port = netdev_priv(dev);
2072     int err = 0;
2073 
2074     switch (obj->id) {
2075     case SWITCHDEV_OBJ_ID_PORT_VLAN:
2076         err = rocker_world_port_obj_vlan_add(rocker_port,
2077                              SWITCHDEV_OBJ_PORT_VLAN(obj));
2078         break;
2079     default:
2080         err = -EOPNOTSUPP;
2081         break;
2082     }
2083 
2084     return err;
2085 }
2086 
2087 static int rocker_port_obj_del(struct net_device *dev,
2088                    const struct switchdev_obj *obj)
2089 {
2090     struct rocker_port *rocker_port = netdev_priv(dev);
2091     int err = 0;
2092 
2093     switch (obj->id) {
2094     case SWITCHDEV_OBJ_ID_PORT_VLAN:
2095         err = rocker_world_port_obj_vlan_del(rocker_port,
2096                              SWITCHDEV_OBJ_PORT_VLAN(obj));
2097         break;
2098     default:
2099         err = -EOPNOTSUPP;
2100         break;
2101     }
2102 
2103     return err;
2104 }
2105 
2106 struct rocker_fib_event_work {
2107     struct work_struct work;
2108     union {
2109         struct fib_entry_notifier_info fen_info;
2110         struct fib_rule_notifier_info fr_info;
2111     };
2112     struct rocker *rocker;
2113     unsigned long event;
2114 };
2115 
2116 static void rocker_router_fib_event_work(struct work_struct *work)
2117 {
2118     struct rocker_fib_event_work *fib_work =
2119         container_of(work, struct rocker_fib_event_work, work);
2120     struct rocker *rocker = fib_work->rocker;
2121     struct fib_rule *rule;
2122     int err;
2123 
2124     /* Protect internal structures from changes */
2125     rtnl_lock();
2126     switch (fib_work->event) {
2127     case FIB_EVENT_ENTRY_REPLACE:
2128         err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
2129         if (err)
2130             rocker_world_fib4_abort(rocker);
2131         fib_info_put(fib_work->fen_info.fi);
2132         break;
2133     case FIB_EVENT_ENTRY_DEL:
2134         rocker_world_fib4_del(rocker, &fib_work->fen_info);
2135         fib_info_put(fib_work->fen_info.fi);
2136         break;
2137     case FIB_EVENT_RULE_ADD:
2138     case FIB_EVENT_RULE_DEL:
2139         rule = fib_work->fr_info.rule;
2140         if (!fib4_rule_default(rule))
2141             rocker_world_fib4_abort(rocker);
2142         fib_rule_put(rule);
2143         break;
2144     }
2145     rtnl_unlock();
2146     kfree(fib_work);
2147 }
2148 
2149 /* Called with rcu_read_lock() */
2150 static int rocker_router_fib_event(struct notifier_block *nb,
2151                    unsigned long event, void *ptr)
2152 {
2153     struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
2154     struct rocker_fib_event_work *fib_work;
2155     struct fib_notifier_info *info = ptr;
2156 
2157     if (info->family != AF_INET)
2158         return NOTIFY_DONE;
2159 
2160     fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2161     if (WARN_ON(!fib_work))
2162         return NOTIFY_BAD;
2163 
2164     INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
2165     fib_work->rocker = rocker;
2166     fib_work->event = event;
2167 
2168     switch (event) {
2169     case FIB_EVENT_ENTRY_REPLACE:
2170     case FIB_EVENT_ENTRY_DEL:
2171         if (info->family == AF_INET) {
2172             struct fib_entry_notifier_info *fen_info = ptr;
2173 
2174             if (fen_info->fi->fib_nh_is_v6) {
2175                 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
2176                 kfree(fib_work);
2177                 return notifier_from_errno(-EINVAL);
2178             }
2179             if (fen_info->fi->nh) {
2180                 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
2181                 kfree(fib_work);
2182                 return notifier_from_errno(-EINVAL);
2183             }
2184         }
2185 
2186         memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2187         /* Take referece on fib_info to prevent it from being
2188          * freed while work is queued. Release it afterwards.
2189          */
2190         fib_info_hold(fib_work->fen_info.fi);
2191         break;
2192     case FIB_EVENT_RULE_ADD:
2193     case FIB_EVENT_RULE_DEL:
2194         memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2195         fib_rule_get(fib_work->fr_info.rule);
2196         break;
2197     }
2198 
2199     queue_work(rocker->rocker_owq, &fib_work->work);
2200 
2201     return NOTIFY_DONE;
2202 }
2203 
2204 /********************
2205  * ethtool interface
2206  ********************/
2207 
2208 static int
2209 rocker_port_get_link_ksettings(struct net_device *dev,
2210                    struct ethtool_link_ksettings *ecmd)
2211 {
2212     struct rocker_port *rocker_port = netdev_priv(dev);
2213 
2214     return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
2215 }
2216 
2217 static int
2218 rocker_port_set_link_ksettings(struct net_device *dev,
2219                    const struct ethtool_link_ksettings *ecmd)
2220 {
2221     struct rocker_port *rocker_port = netdev_priv(dev);
2222 
2223     return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
2224 }
2225 
2226 static void rocker_port_get_drvinfo(struct net_device *dev,
2227                     struct ethtool_drvinfo *drvinfo)
2228 {
2229     strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
2230     strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2231 }
2232 
2233 static struct rocker_port_stats {
2234     char str[ETH_GSTRING_LEN];
2235     int type;
2236 } rocker_port_stats[] = {
2237     { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
2238     { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
2239     { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
2240     { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
2241 
2242     { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
2243     { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
2244     { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
2245     { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
2246 };
2247 
2248 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
2249 
2250 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
2251                     u8 *data)
2252 {
2253     u8 *p = data;
2254     int i;
2255 
2256     switch (stringset) {
2257     case ETH_SS_STATS:
2258         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
2259             memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
2260             p += ETH_GSTRING_LEN;
2261         }
2262         break;
2263     }
2264 }
2265 
2266 static int
2267 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
2268                    struct rocker_desc_info *desc_info,
2269                    void *priv)
2270 {
2271     struct rocker_tlv *cmd_stats;
2272 
2273     if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2274                    ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
2275         return -EMSGSIZE;
2276 
2277     cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2278     if (!cmd_stats)
2279         return -EMSGSIZE;
2280 
2281     if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
2282                    rocker_port->pport))
2283         return -EMSGSIZE;
2284 
2285     rocker_tlv_nest_end(desc_info, cmd_stats);
2286 
2287     return 0;
2288 }
2289 
2290 static int
2291 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
2292                        const struct rocker_desc_info *desc_info,
2293                        void *priv)
2294 {
2295     const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
2296     const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
2297     const struct rocker_tlv *pattr;
2298     u32 pport;
2299     u64 *data = priv;
2300     int i;
2301 
2302     rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
2303 
2304     if (!attrs[ROCKER_TLV_CMD_INFO])
2305         return -EIO;
2306 
2307     rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
2308                 attrs[ROCKER_TLV_CMD_INFO]);
2309 
2310     if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
2311         return -EIO;
2312 
2313     pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
2314     if (pport != rocker_port->pport)
2315         return -EIO;
2316 
2317     for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
2318         pattr = stats_attrs[rocker_port_stats[i].type];
2319         if (!pattr)
2320             continue;
2321 
2322         data[i] = rocker_tlv_get_u64(pattr);
2323     }
2324 
2325     return 0;
2326 }
2327 
2328 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
2329                          void *priv)
2330 {
2331     return rocker_cmd_exec(rocker_port, false,
2332                    rocker_cmd_get_port_stats_prep, NULL,
2333                    rocker_cmd_get_port_stats_ethtool_proc,
2334                    priv);
2335 }
2336 
2337 static void rocker_port_get_stats(struct net_device *dev,
2338                   struct ethtool_stats *stats, u64 *data)
2339 {
2340     struct rocker_port *rocker_port = netdev_priv(dev);
2341 
2342     if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
2343         int i;
2344 
2345         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
2346             data[i] = 0;
2347     }
2348 }
2349 
2350 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
2351 {
2352     switch (sset) {
2353     case ETH_SS_STATS:
2354         return ROCKER_PORT_STATS_LEN;
2355     default:
2356         return -EOPNOTSUPP;
2357     }
2358 }
2359 
2360 static const struct ethtool_ops rocker_port_ethtool_ops = {
2361     .get_drvinfo        = rocker_port_get_drvinfo,
2362     .get_link       = ethtool_op_get_link,
2363     .get_strings        = rocker_port_get_strings,
2364     .get_ethtool_stats  = rocker_port_get_stats,
2365     .get_sset_count     = rocker_port_get_sset_count,
2366     .get_link_ksettings = rocker_port_get_link_ksettings,
2367     .set_link_ksettings = rocker_port_set_link_ksettings,
2368 };
2369 
2370 /*****************
2371  * NAPI interface
2372  *****************/
2373 
2374 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
2375 {
2376     return container_of(napi, struct rocker_port, napi_tx);
2377 }
2378 
2379 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
2380 {
2381     struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
2382     const struct rocker *rocker = rocker_port->rocker;
2383     const struct rocker_desc_info *desc_info;
2384     u32 credits = 0;
2385     int err;
2386 
2387     /* Cleanup tx descriptors */
2388     while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
2389         struct sk_buff *skb;
2390 
2391         err = rocker_desc_err(desc_info);
2392         if (err && net_ratelimit())
2393             netdev_err(rocker_port->dev, "tx desc received with err %d\n",
2394                    err);
2395         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
2396 
2397         skb = rocker_desc_cookie_ptr_get(desc_info);
2398         if (err == 0) {
2399             rocker_port->dev->stats.tx_packets++;
2400             rocker_port->dev->stats.tx_bytes += skb->len;
2401         } else {
2402             rocker_port->dev->stats.tx_errors++;
2403         }
2404 
2405         dev_kfree_skb_any(skb);
2406         credits++;
2407     }
2408 
2409     if (credits && netif_queue_stopped(rocker_port->dev))
2410         netif_wake_queue(rocker_port->dev);
2411 
2412     napi_complete(napi);
2413     rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
2414 
2415     return 0;
2416 }
2417 
2418 static int rocker_port_rx_proc(const struct rocker *rocker,
2419                    const struct rocker_port *rocker_port,
2420                    struct rocker_desc_info *desc_info)
2421 {
2422     const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
2423     struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
2424     size_t rx_len;
2425     u16 rx_flags = 0;
2426 
2427     if (!skb)
2428         return -ENOENT;
2429 
2430     rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
2431     if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
2432         return -EINVAL;
2433     if (attrs[ROCKER_TLV_RX_FLAGS])
2434         rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
2435 
2436     rocker_dma_rx_ring_skb_unmap(rocker, attrs);
2437 
2438     rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
2439     skb_put(skb, rx_len);
2440     skb->protocol = eth_type_trans(skb, rocker_port->dev);
2441 
2442     if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
2443         skb->offload_fwd_mark = 1;
2444 
2445     rocker_port->dev->stats.rx_packets++;
2446     rocker_port->dev->stats.rx_bytes += skb->len;
2447 
2448     netif_receive_skb(skb);
2449 
2450     return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
2451 }
2452 
2453 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
2454 {
2455     return container_of(napi, struct rocker_port, napi_rx);
2456 }
2457 
2458 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
2459 {
2460     struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
2461     const struct rocker *rocker = rocker_port->rocker;
2462     struct rocker_desc_info *desc_info;
2463     u32 credits = 0;
2464     int err;
2465 
2466     /* Process rx descriptors */
2467     while (credits < budget &&
2468            (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
2469         err = rocker_desc_err(desc_info);
2470         if (err) {
2471             if (net_ratelimit())
2472                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
2473                        err);
2474         } else {
2475             err = rocker_port_rx_proc(rocker, rocker_port,
2476                           desc_info);
2477             if (err && net_ratelimit())
2478                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
2479                        err);
2480         }
2481         if (err)
2482             rocker_port->dev->stats.rx_errors++;
2483 
2484         rocker_desc_gen_clear(desc_info);
2485         rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
2486         credits++;
2487     }
2488 
2489     if (credits < budget)
2490         napi_complete_done(napi, credits);
2491 
2492     rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
2493 
2494     return credits;
2495 }
2496 
2497 /*****************
2498  * PCI driver ops
2499  *****************/
2500 
2501 static void rocker_carrier_init(const struct rocker_port *rocker_port)
2502 {
2503     const struct rocker *rocker = rocker_port->rocker;
2504     u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
2505     bool link_up;
2506 
2507     link_up = link_status & (1 << rocker_port->pport);
2508     if (link_up)
2509         netif_carrier_on(rocker_port->dev);
2510     else
2511         netif_carrier_off(rocker_port->dev);
2512 }
2513 
2514 static void rocker_remove_ports(struct rocker *rocker)
2515 {
2516     struct rocker_port *rocker_port;
2517     int i;
2518 
2519     for (i = 0; i < rocker->port_count; i++) {
2520         rocker_port = rocker->ports[i];
2521         if (!rocker_port)
2522             continue;
2523         rocker_world_port_fini(rocker_port);
2524         unregister_netdev(rocker_port->dev);
2525         rocker_world_port_post_fini(rocker_port);
2526         free_netdev(rocker_port->dev);
2527     }
2528     rocker_world_fini(rocker);
2529     kfree(rocker->ports);
2530 }
2531 
2532 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
2533 {
2534     const struct rocker *rocker = rocker_port->rocker;
2535     const struct pci_dev *pdev = rocker->pdev;
2536     u8 addr[ETH_ALEN];
2537     int err;
2538 
2539     err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr);
2540     if (!err) {
2541         eth_hw_addr_set(rocker_port->dev, addr);
2542     } else {
2543         dev_warn(&pdev->dev, "failed to get mac address, using random\n");
2544         eth_hw_addr_random(rocker_port->dev);
2545     }
2546 }
2547 
2548 #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU
2549 #define ROCKER_PORT_MAX_MTU 9000
2550 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
2551 {
2552     struct pci_dev *pdev = rocker->pdev;
2553     struct rocker_port *rocker_port;
2554     struct net_device *dev;
2555     int err;
2556 
2557     dev = alloc_etherdev(sizeof(struct rocker_port));
2558     if (!dev)
2559         return -ENOMEM;
2560     SET_NETDEV_DEV(dev, &pdev->dev);
2561     rocker_port = netdev_priv(dev);
2562     rocker_port->dev = dev;
2563     rocker_port->rocker = rocker;
2564     rocker_port->port_number = port_number;
2565     rocker_port->pport = port_number + 1;
2566 
2567     err = rocker_world_check_init(rocker_port);
2568     if (err) {
2569         dev_err(&pdev->dev, "world init failed\n");
2570         goto err_world_check_init;
2571     }
2572 
2573     rocker_port_dev_addr_init(rocker_port);
2574     dev->netdev_ops = &rocker_port_netdev_ops;
2575     dev->ethtool_ops = &rocker_port_ethtool_ops;
2576     netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx);
2577     netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
2578                NAPI_POLL_WEIGHT);
2579     rocker_carrier_init(rocker_port);
2580 
2581     dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
2582 
2583     /* MTU range: 68 - 9000 */
2584     dev->min_mtu = ROCKER_PORT_MIN_MTU;
2585     dev->max_mtu = ROCKER_PORT_MAX_MTU;
2586 
2587     err = rocker_world_port_pre_init(rocker_port);
2588     if (err) {
2589         dev_err(&pdev->dev, "port world pre-init failed\n");
2590         goto err_world_port_pre_init;
2591     }
2592     err = register_netdev(dev);
2593     if (err) {
2594         dev_err(&pdev->dev, "register_netdev failed\n");
2595         goto err_register_netdev;
2596     }
2597     rocker->ports[port_number] = rocker_port;
2598 
2599     err = rocker_world_port_init(rocker_port);
2600     if (err) {
2601         dev_err(&pdev->dev, "port world init failed\n");
2602         goto err_world_port_init;
2603     }
2604 
2605     return 0;
2606 
2607 err_world_port_init:
2608     rocker->ports[port_number] = NULL;
2609     unregister_netdev(dev);
2610 err_register_netdev:
2611     rocker_world_port_post_fini(rocker_port);
2612 err_world_port_pre_init:
2613 err_world_check_init:
2614     free_netdev(dev);
2615     return err;
2616 }
2617 
2618 static int rocker_probe_ports(struct rocker *rocker)
2619 {
2620     int i;
2621     size_t alloc_size;
2622     int err;
2623 
2624     alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
2625     rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
2626     if (!rocker->ports)
2627         return -ENOMEM;
2628     for (i = 0; i < rocker->port_count; i++) {
2629         err = rocker_probe_port(rocker, i);
2630         if (err)
2631             goto remove_ports;
2632     }
2633     return 0;
2634 
2635 remove_ports:
2636     rocker_remove_ports(rocker);
2637     return err;
2638 }
2639 
2640 static int rocker_msix_init(struct rocker *rocker)
2641 {
2642     struct pci_dev *pdev = rocker->pdev;
2643     int msix_entries;
2644     int i;
2645     int err;
2646 
2647     msix_entries = pci_msix_vec_count(pdev);
2648     if (msix_entries < 0)
2649         return msix_entries;
2650 
2651     if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
2652         return -EINVAL;
2653 
2654     rocker->msix_entries = kmalloc_array(msix_entries,
2655                          sizeof(struct msix_entry),
2656                          GFP_KERNEL);
2657     if (!rocker->msix_entries)
2658         return -ENOMEM;
2659 
2660     for (i = 0; i < msix_entries; i++)
2661         rocker->msix_entries[i].entry = i;
2662 
2663     err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
2664     if (err < 0)
2665         goto err_enable_msix;
2666 
2667     return 0;
2668 
2669 err_enable_msix:
2670     kfree(rocker->msix_entries);
2671     return err;
2672 }
2673 
2674 static void rocker_msix_fini(const struct rocker *rocker)
2675 {
2676     pci_disable_msix(rocker->pdev);
2677     kfree(rocker->msix_entries);
2678 }
2679 
2680 static bool rocker_port_dev_check(const struct net_device *dev)
2681 {
2682     return dev->netdev_ops == &rocker_port_netdev_ops;
2683 }
2684 
2685 static int
2686 rocker_switchdev_port_attr_set_event(struct net_device *netdev,
2687         struct switchdev_notifier_port_attr_info *port_attr_info)
2688 {
2689     int err;
2690 
2691     err = rocker_port_attr_set(netdev, port_attr_info->attr);
2692 
2693     port_attr_info->handled = true;
2694     return notifier_from_errno(err);
2695 }
2696 
2697 struct rocker_switchdev_event_work {
2698     struct work_struct work;
2699     struct switchdev_notifier_fdb_info fdb_info;
2700     struct rocker_port *rocker_port;
2701     unsigned long event;
2702 };
2703 
2704 static void
2705 rocker_fdb_offload_notify(struct rocker_port *rocker_port,
2706               struct switchdev_notifier_fdb_info *recv_info)
2707 {
2708     struct switchdev_notifier_fdb_info info = {};
2709 
2710     info.addr = recv_info->addr;
2711     info.vid = recv_info->vid;
2712     info.offloaded = true;
2713     call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2714                  rocker_port->dev, &info.info, NULL);
2715 }
2716 
2717 static void rocker_switchdev_event_work(struct work_struct *work)
2718 {
2719     struct rocker_switchdev_event_work *switchdev_work =
2720         container_of(work, struct rocker_switchdev_event_work, work);
2721     struct rocker_port *rocker_port = switchdev_work->rocker_port;
2722     struct switchdev_notifier_fdb_info *fdb_info;
2723     int err;
2724 
2725     rtnl_lock();
2726     switch (switchdev_work->event) {
2727     case SWITCHDEV_FDB_ADD_TO_DEVICE:
2728         fdb_info = &switchdev_work->fdb_info;
2729         if (!fdb_info->added_by_user || fdb_info->is_local)
2730             break;
2731         err = rocker_world_port_fdb_add(rocker_port, fdb_info);
2732         if (err) {
2733             netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
2734             break;
2735         }
2736         rocker_fdb_offload_notify(rocker_port, fdb_info);
2737         break;
2738     case SWITCHDEV_FDB_DEL_TO_DEVICE:
2739         fdb_info = &switchdev_work->fdb_info;
2740         if (!fdb_info->added_by_user || fdb_info->is_local)
2741             break;
2742         err = rocker_world_port_fdb_del(rocker_port, fdb_info);
2743         if (err)
2744             netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
2745         break;
2746     }
2747     rtnl_unlock();
2748 
2749     kfree(switchdev_work->fdb_info.addr);
2750     kfree(switchdev_work);
2751     dev_put(rocker_port->dev);
2752 }
2753 
2754 /* called under rcu_read_lock() */
2755 static int rocker_switchdev_event(struct notifier_block *unused,
2756                   unsigned long event, void *ptr)
2757 {
2758     struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2759     struct rocker_switchdev_event_work *switchdev_work;
2760     struct switchdev_notifier_fdb_info *fdb_info = ptr;
2761     struct rocker_port *rocker_port;
2762 
2763     if (!rocker_port_dev_check(dev))
2764         return NOTIFY_DONE;
2765 
2766     if (event == SWITCHDEV_PORT_ATTR_SET)
2767         return rocker_switchdev_port_attr_set_event(dev, ptr);
2768 
2769     rocker_port = netdev_priv(dev);
2770     switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2771     if (WARN_ON(!switchdev_work))
2772         return NOTIFY_BAD;
2773 
2774     INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
2775     switchdev_work->rocker_port = rocker_port;
2776     switchdev_work->event = event;
2777 
2778     switch (event) {
2779     case SWITCHDEV_FDB_ADD_TO_DEVICE:
2780     case SWITCHDEV_FDB_DEL_TO_DEVICE:
2781         memcpy(&switchdev_work->fdb_info, ptr,
2782                sizeof(switchdev_work->fdb_info));
2783         switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2784         if (unlikely(!switchdev_work->fdb_info.addr)) {
2785             kfree(switchdev_work);
2786             return NOTIFY_BAD;
2787         }
2788 
2789         ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2790                 fdb_info->addr);
2791         /* Take a reference on the rocker device */
2792         dev_hold(dev);
2793         break;
2794     default:
2795         kfree(switchdev_work);
2796         return NOTIFY_DONE;
2797     }
2798 
2799     queue_work(rocker_port->rocker->rocker_owq,
2800            &switchdev_work->work);
2801     return NOTIFY_DONE;
2802 }
2803 
2804 static int
2805 rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
2806             struct switchdev_notifier_port_obj_info *port_obj_info)
2807 {
2808     int err = -EOPNOTSUPP;
2809 
2810     switch (event) {
2811     case SWITCHDEV_PORT_OBJ_ADD:
2812         err = rocker_port_obj_add(netdev, port_obj_info->obj);
2813         break;
2814     case SWITCHDEV_PORT_OBJ_DEL:
2815         err = rocker_port_obj_del(netdev, port_obj_info->obj);
2816         break;
2817     }
2818 
2819     port_obj_info->handled = true;
2820     return notifier_from_errno(err);
2821 }
2822 
2823 static int rocker_switchdev_blocking_event(struct notifier_block *unused,
2824                        unsigned long event, void *ptr)
2825 {
2826     struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2827 
2828     if (!rocker_port_dev_check(dev))
2829         return NOTIFY_DONE;
2830 
2831     switch (event) {
2832     case SWITCHDEV_PORT_OBJ_ADD:
2833     case SWITCHDEV_PORT_OBJ_DEL:
2834         return rocker_switchdev_port_obj_event(event, dev, ptr);
2835     case SWITCHDEV_PORT_ATTR_SET:
2836         return rocker_switchdev_port_attr_set_event(dev, ptr);
2837     }
2838 
2839     return NOTIFY_DONE;
2840 }
2841 
2842 static struct notifier_block rocker_switchdev_notifier = {
2843     .notifier_call = rocker_switchdev_event,
2844 };
2845 
2846 static struct notifier_block rocker_switchdev_blocking_notifier = {
2847     .notifier_call = rocker_switchdev_blocking_event,
2848 };
2849 
2850 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2851 {
2852     struct notifier_block *nb;
2853     struct rocker *rocker;
2854     int err;
2855 
2856     rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
2857     if (!rocker)
2858         return -ENOMEM;
2859 
2860     err = pci_enable_device(pdev);
2861     if (err) {
2862         dev_err(&pdev->dev, "pci_enable_device failed\n");
2863         goto err_pci_enable_device;
2864     }
2865 
2866     err = pci_request_regions(pdev, rocker_driver_name);
2867     if (err) {
2868         dev_err(&pdev->dev, "pci_request_regions failed\n");
2869         goto err_pci_request_regions;
2870     }
2871 
2872     err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2873     if (err) {
2874         dev_err(&pdev->dev, "dma_set_mask failed\n");
2875         goto err_pci_set_dma_mask;
2876     }
2877 
2878     if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
2879         dev_err(&pdev->dev, "invalid PCI region size\n");
2880         err = -EINVAL;
2881         goto err_pci_resource_len_check;
2882     }
2883 
2884     rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
2885                   pci_resource_len(pdev, 0));
2886     if (!rocker->hw_addr) {
2887         dev_err(&pdev->dev, "ioremap failed\n");
2888         err = -EIO;
2889         goto err_ioremap;
2890     }
2891     pci_set_master(pdev);
2892 
2893     rocker->pdev = pdev;
2894     pci_set_drvdata(pdev, rocker);
2895 
2896     rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
2897 
2898     err = rocker_msix_init(rocker);
2899     if (err) {
2900         dev_err(&pdev->dev, "MSI-X init failed\n");
2901         goto err_msix_init;
2902     }
2903 
2904     err = rocker_basic_hw_test(rocker);
2905     if (err) {
2906         dev_err(&pdev->dev, "basic hw test failed\n");
2907         goto err_basic_hw_test;
2908     }
2909 
2910     rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
2911 
2912     err = rocker_dma_rings_init(rocker);
2913     if (err)
2914         goto err_dma_rings_init;
2915 
2916     err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
2917               rocker_cmd_irq_handler, 0,
2918               rocker_driver_name, rocker);
2919     if (err) {
2920         dev_err(&pdev->dev, "cannot assign cmd irq\n");
2921         goto err_request_cmd_irq;
2922     }
2923 
2924     err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
2925               rocker_event_irq_handler, 0,
2926               rocker_driver_name, rocker);
2927     if (err) {
2928         dev_err(&pdev->dev, "cannot assign event irq\n");
2929         goto err_request_event_irq;
2930     }
2931 
2932     rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name,
2933                              WQ_MEM_RECLAIM);
2934     if (!rocker->rocker_owq) {
2935         err = -ENOMEM;
2936         goto err_alloc_ordered_workqueue;
2937     }
2938 
2939     err = rocker_probe_ports(rocker);
2940     if (err) {
2941         dev_err(&pdev->dev, "failed to probe ports\n");
2942         goto err_probe_ports;
2943     }
2944 
2945     /* Only FIBs pointing to our own netdevs are programmed into
2946      * the device, so no need to pass a callback.
2947      */
2948     rocker->fib_nb.notifier_call = rocker_router_fib_event;
2949     err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL);
2950     if (err)
2951         goto err_register_fib_notifier;
2952 
2953     err = register_switchdev_notifier(&rocker_switchdev_notifier);
2954     if (err) {
2955         dev_err(&pdev->dev, "Failed to register switchdev notifier\n");
2956         goto err_register_switchdev_notifier;
2957     }
2958 
2959     nb = &rocker_switchdev_blocking_notifier;
2960     err = register_switchdev_blocking_notifier(nb);
2961     if (err) {
2962         dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
2963         goto err_register_switchdev_blocking_notifier;
2964     }
2965 
2966     rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
2967 
2968     dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
2969          (int)sizeof(rocker->hw.id), &rocker->hw.id);
2970 
2971     return 0;
2972 
2973 err_register_switchdev_blocking_notifier:
2974     unregister_switchdev_notifier(&rocker_switchdev_notifier);
2975 err_register_switchdev_notifier:
2976     unregister_fib_notifier(&init_net, &rocker->fib_nb);
2977 err_register_fib_notifier:
2978     rocker_remove_ports(rocker);
2979 err_probe_ports:
2980     destroy_workqueue(rocker->rocker_owq);
2981 err_alloc_ordered_workqueue:
2982     free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
2983 err_request_event_irq:
2984     free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
2985 err_request_cmd_irq:
2986     rocker_dma_rings_fini(rocker);
2987 err_dma_rings_init:
2988 err_basic_hw_test:
2989     rocker_msix_fini(rocker);
2990 err_msix_init:
2991     iounmap(rocker->hw_addr);
2992 err_ioremap:
2993 err_pci_resource_len_check:
2994 err_pci_set_dma_mask:
2995     pci_release_regions(pdev);
2996 err_pci_request_regions:
2997     pci_disable_device(pdev);
2998 err_pci_enable_device:
2999     kfree(rocker);
3000     return err;
3001 }
3002 
3003 static void rocker_remove(struct pci_dev *pdev)
3004 {
3005     struct rocker *rocker = pci_get_drvdata(pdev);
3006     struct notifier_block *nb;
3007 
3008     nb = &rocker_switchdev_blocking_notifier;
3009     unregister_switchdev_blocking_notifier(nb);
3010 
3011     unregister_switchdev_notifier(&rocker_switchdev_notifier);
3012     unregister_fib_notifier(&init_net, &rocker->fib_nb);
3013     rocker_remove_ports(rocker);
3014     rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
3015     destroy_workqueue(rocker->rocker_owq);
3016     free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
3017     free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
3018     rocker_dma_rings_fini(rocker);
3019     rocker_msix_fini(rocker);
3020     iounmap(rocker->hw_addr);
3021     pci_release_regions(rocker->pdev);
3022     pci_disable_device(rocker->pdev);
3023     kfree(rocker);
3024 }
3025 
3026 static struct pci_driver rocker_pci_driver = {
3027     .name       = rocker_driver_name,
3028     .id_table   = rocker_pci_id_table,
3029     .probe      = rocker_probe,
3030     .remove     = rocker_remove,
3031 };
3032 
3033 /************************************
3034  * Net device notifier event handler
3035  ************************************/
3036 
3037 static bool rocker_port_dev_check_under(const struct net_device *dev,
3038                     struct rocker *rocker)
3039 {
3040     struct rocker_port *rocker_port;
3041 
3042     if (!rocker_port_dev_check(dev))
3043         return false;
3044 
3045     rocker_port = netdev_priv(dev);
3046     if (rocker_port->rocker != rocker)
3047         return false;
3048 
3049     return true;
3050 }
3051 
3052 struct rocker_walk_data {
3053     struct rocker *rocker;
3054     struct rocker_port *port;
3055 };
3056 
3057 static int rocker_lower_dev_walk(struct net_device *lower_dev,
3058                  struct netdev_nested_priv *priv)
3059 {
3060     struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data;
3061     int ret = 0;
3062 
3063     if (rocker_port_dev_check_under(lower_dev, data->rocker)) {
3064         data->port = netdev_priv(lower_dev);
3065         ret = 1;
3066     }
3067 
3068     return ret;
3069 }
3070 
3071 struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
3072                            struct rocker *rocker)
3073 {
3074     struct netdev_nested_priv priv;
3075     struct rocker_walk_data data;
3076 
3077     if (rocker_port_dev_check_under(dev, rocker))
3078         return netdev_priv(dev);
3079 
3080     data.rocker = rocker;
3081     data.port = NULL;
3082     priv.data = (void *)&data;
3083     netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv);
3084 
3085     return data.port;
3086 }
3087 
3088 static int rocker_netdevice_event(struct notifier_block *unused,
3089                   unsigned long event, void *ptr)
3090 {
3091     struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
3092     struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3093     struct netdev_notifier_changeupper_info *info;
3094     struct rocker_port *rocker_port;
3095     int err;
3096 
3097     if (!rocker_port_dev_check(dev))
3098         return NOTIFY_DONE;
3099 
3100     switch (event) {
3101     case NETDEV_CHANGEUPPER:
3102         info = ptr;
3103         if (!info->master)
3104             goto out;
3105         rocker_port = netdev_priv(dev);
3106         if (info->linking) {
3107             err = rocker_world_port_master_linked(rocker_port,
3108                                   info->upper_dev,
3109                                   extack);
3110             if (err)
3111                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
3112                         err);
3113         } else {
3114             err = rocker_world_port_master_unlinked(rocker_port,
3115                                 info->upper_dev);
3116             if (err)
3117                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
3118                         err);
3119         }
3120     }
3121 out:
3122     return NOTIFY_DONE;
3123 }
3124 
3125 static struct notifier_block rocker_netdevice_nb __read_mostly = {
3126     .notifier_call = rocker_netdevice_event,
3127 };
3128 
3129 /************************************
3130  * Net event notifier event handler
3131  ************************************/
3132 
3133 static int rocker_netevent_event(struct notifier_block *unused,
3134                  unsigned long event, void *ptr)
3135 {
3136     struct rocker_port *rocker_port;
3137     struct net_device *dev;
3138     struct neighbour *n = ptr;
3139     int err;
3140 
3141     switch (event) {
3142     case NETEVENT_NEIGH_UPDATE:
3143         if (n->tbl != &arp_tbl)
3144             return NOTIFY_DONE;
3145         dev = n->dev;
3146         if (!rocker_port_dev_check(dev))
3147             return NOTIFY_DONE;
3148         rocker_port = netdev_priv(dev);
3149         err = rocker_world_port_neigh_update(rocker_port, n);
3150         if (err)
3151             netdev_warn(dev, "failed to handle neigh update (err %d)\n",
3152                     err);
3153         break;
3154     }
3155 
3156     return NOTIFY_DONE;
3157 }
3158 
3159 static struct notifier_block rocker_netevent_nb __read_mostly = {
3160     .notifier_call = rocker_netevent_event,
3161 };
3162 
3163 /***********************
3164  * Module init and exit
3165  ***********************/
3166 
3167 static int __init rocker_module_init(void)
3168 {
3169     int err;
3170 
3171     register_netdevice_notifier(&rocker_netdevice_nb);
3172     register_netevent_notifier(&rocker_netevent_nb);
3173     err = pci_register_driver(&rocker_pci_driver);
3174     if (err)
3175         goto err_pci_register_driver;
3176     return 0;
3177 
3178 err_pci_register_driver:
3179     unregister_netevent_notifier(&rocker_netevent_nb);
3180     unregister_netdevice_notifier(&rocker_netdevice_nb);
3181     return err;
3182 }
3183 
3184 static void __exit rocker_module_exit(void)
3185 {
3186     unregister_netevent_notifier(&rocker_netevent_nb);
3187     unregister_netdevice_notifier(&rocker_netdevice_nb);
3188     pci_unregister_driver(&rocker_pci_driver);
3189 }
3190 
3191 module_init(rocker_module_init);
3192 module_exit(rocker_module_exit);
3193 
3194 MODULE_LICENSE("GPL v2");
3195 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3196 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
3197 MODULE_DESCRIPTION("Rocker switch device driver");
3198 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);