0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/errno.h>
0006 #include <linux/types.h>
0007 #include <linux/pci.h>
0008 #include <linux/delay.h>
0009 #include <linux/if_ether.h>
0010 #include <linux/slab.h>
0011 #include "vnic_resource.h"
0012 #include "vnic_devcmd.h"
0013 #include "vnic_dev.h"
0014 #include "vnic_stats.h"
0015 #include "vnic_wq.h"
0016
0017 #define VNIC_DVCMD_TMO 10000
0018 #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
0019
0020 struct devcmd2_controller {
0021 struct vnic_wq_ctrl __iomem *wq_ctrl;
0022 struct vnic_dev_ring results_ring;
0023 struct vnic_wq wq;
0024 struct vnic_devcmd2 *cmd_ring;
0025 struct devcmd2_result *result;
0026 u16 next_result;
0027 u16 result_size;
0028 int color;
0029 };
0030
0031 struct vnic_res {
0032 void __iomem *vaddr;
0033 unsigned int count;
0034 };
0035
0036 struct vnic_dev {
0037 void *priv;
0038 struct pci_dev *pdev;
0039 struct vnic_res res[RES_TYPE_MAX];
0040 enum vnic_dev_intr_mode intr_mode;
0041 struct vnic_devcmd __iomem *devcmd;
0042 struct vnic_devcmd_notify *notify;
0043 struct vnic_devcmd_notify notify_copy;
0044 dma_addr_t notify_pa;
0045 u32 *linkstatus;
0046 dma_addr_t linkstatus_pa;
0047 struct vnic_stats *stats;
0048 dma_addr_t stats_pa;
0049 struct vnic_devcmd_fw_info *fw_info;
0050 dma_addr_t fw_info_pa;
0051 u64 args[VNIC_DEVCMD_NARGS];
0052 struct devcmd2_controller *devcmd2;
0053
0054 int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
0055 int wait);
0056 };
0057
0058 #define VNIC_MAX_RES_HDR_SIZE \
0059 (sizeof(struct vnic_resource_header) + \
0060 sizeof(struct vnic_resource) * RES_TYPE_MAX)
0061 #define VNIC_RES_STRIDE 128
0062
0063 void *svnic_dev_priv(struct vnic_dev *vdev)
0064 {
0065 return vdev->priv;
0066 }
0067
0068 static int vnic_dev_discover_res(struct vnic_dev *vdev,
0069 struct vnic_dev_bar *bar, unsigned int num_bars)
0070 {
0071 struct vnic_resource_header __iomem *rh;
0072 struct vnic_resource __iomem *r;
0073 u8 type;
0074
0075 if (num_bars == 0)
0076 return -EINVAL;
0077
0078 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
0079 pr_err("vNIC BAR0 res hdr length error\n");
0080
0081 return -EINVAL;
0082 }
0083
0084 rh = bar->vaddr;
0085 if (!rh) {
0086 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
0087
0088 return -EINVAL;
0089 }
0090
0091 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
0092 ioread32(&rh->version) != VNIC_RES_VERSION) {
0093 pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
0094 VNIC_RES_MAGIC, VNIC_RES_VERSION,
0095 ioread32(&rh->magic), ioread32(&rh->version));
0096
0097 return -EINVAL;
0098 }
0099
0100 r = (struct vnic_resource __iomem *)(rh + 1);
0101
0102 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
0103
0104 u8 bar_num = ioread8(&r->bar);
0105 u32 bar_offset = ioread32(&r->bar_offset);
0106 u32 count = ioread32(&r->count);
0107 u32 len;
0108
0109 r++;
0110
0111 if (bar_num >= num_bars)
0112 continue;
0113
0114 if (!bar[bar_num].len || !bar[bar_num].vaddr)
0115 continue;
0116
0117 switch (type) {
0118 case RES_TYPE_WQ:
0119 case RES_TYPE_RQ:
0120 case RES_TYPE_CQ:
0121 case RES_TYPE_INTR_CTRL:
0122
0123 len = count * VNIC_RES_STRIDE;
0124 if (len + bar_offset > bar->len) {
0125 pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
0126 type, bar_offset,
0127 len,
0128 bar->len);
0129
0130 return -EINVAL;
0131 }
0132 break;
0133
0134 case RES_TYPE_INTR_PBA_LEGACY:
0135 case RES_TYPE_DEVCMD:
0136 case RES_TYPE_DEVCMD2:
0137 len = count;
0138 break;
0139
0140 default:
0141 continue;
0142 }
0143
0144 vdev->res[type].count = count;
0145 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
0146 }
0147
0148 return 0;
0149 }
0150
0151 unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
0152 enum vnic_res_type type)
0153 {
0154 return vdev->res[type].count;
0155 }
0156
0157 void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
0158 unsigned int index)
0159 {
0160 if (!vdev->res[type].vaddr)
0161 return NULL;
0162
0163 switch (type) {
0164 case RES_TYPE_WQ:
0165 case RES_TYPE_RQ:
0166 case RES_TYPE_CQ:
0167 case RES_TYPE_INTR_CTRL:
0168 return (char __iomem *)vdev->res[type].vaddr +
0169 index * VNIC_RES_STRIDE;
0170
0171 default:
0172 return (char __iomem *)vdev->res[type].vaddr;
0173 }
0174 }
0175
0176 unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
0177 unsigned int desc_count,
0178 unsigned int desc_size)
0179 {
0180
0181
0182
0183
0184
0185
0186 unsigned int count_align = 32;
0187 unsigned int desc_align = 16;
0188
0189 ring->base_align = 512;
0190
0191 if (desc_count == 0)
0192 desc_count = 4096;
0193
0194 ring->desc_count = ALIGN(desc_count, count_align);
0195
0196 ring->desc_size = ALIGN(desc_size, desc_align);
0197
0198 ring->size = ring->desc_count * ring->desc_size;
0199 ring->size_unaligned = ring->size + ring->base_align;
0200
0201 return ring->size_unaligned;
0202 }
0203
0204 void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
0205 {
0206 memset(ring->descs, 0, ring->size);
0207 }
0208
0209 int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
0210 unsigned int desc_count, unsigned int desc_size)
0211 {
0212 svnic_dev_desc_ring_size(ring, desc_count, desc_size);
0213
0214 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
0215 ring->size_unaligned, &ring->base_addr_unaligned,
0216 GFP_KERNEL);
0217 if (!ring->descs_unaligned) {
0218 pr_err("Failed to allocate ring (size=%d), aborting\n",
0219 (int)ring->size);
0220
0221 return -ENOMEM;
0222 }
0223
0224 ring->base_addr = ALIGN(ring->base_addr_unaligned,
0225 ring->base_align);
0226 ring->descs = (u8 *)ring->descs_unaligned +
0227 (ring->base_addr - ring->base_addr_unaligned);
0228
0229 svnic_dev_clear_desc_ring(ring);
0230
0231 ring->desc_avail = ring->desc_count - 1;
0232
0233 return 0;
0234 }
0235
0236 void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
0237 {
0238 if (ring->descs) {
0239 dma_free_coherent(&vdev->pdev->dev,
0240 ring->size_unaligned,
0241 ring->descs_unaligned,
0242 ring->base_addr_unaligned);
0243 ring->descs = NULL;
0244 }
0245 }
0246
0247 static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
0248 int wait)
0249 {
0250 struct devcmd2_controller *dc2c = vdev->devcmd2;
0251 struct devcmd2_result *result = NULL;
0252 unsigned int i;
0253 int delay;
0254 int err;
0255 u32 posted;
0256 u32 fetch_idx;
0257 u32 new_posted;
0258 u8 color;
0259
0260 fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
0261 if (fetch_idx == 0xFFFFFFFF) {
0262
0263 return -ENODEV;
0264 }
0265
0266 posted = ioread32(&dc2c->wq_ctrl->posted_index);
0267
0268 if (posted == 0xFFFFFFFF) {
0269
0270 return -ENODEV;
0271 }
0272
0273 new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
0274 if (new_posted == fetch_idx) {
0275 pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
0276 pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
0277
0278 return -EBUSY;
0279 }
0280
0281 dc2c->cmd_ring[posted].cmd = cmd;
0282 dc2c->cmd_ring[posted].flags = 0;
0283
0284 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
0285 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
0286
0287 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
0288 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
0289 dc2c->cmd_ring[posted].args[i] = vdev->args[i];
0290 }
0291
0292
0293
0294
0295
0296 wmb();
0297 iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
0298
0299 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
0300 return 0;
0301
0302 result = dc2c->result + dc2c->next_result;
0303 color = dc2c->color;
0304
0305
0306
0307
0308
0309 dc2c->next_result++;
0310 if (dc2c->next_result == dc2c->result_size) {
0311 dc2c->next_result = 0;
0312 dc2c->color = dc2c->color ? 0 : 1;
0313 }
0314
0315 for (delay = 0; delay < wait; delay++) {
0316 udelay(100);
0317 if (result->color == color) {
0318 if (result->error) {
0319 err = (int) result->error;
0320 if (err != ERR_ECMDUNKNOWN ||
0321 cmd != CMD_CAPABILITY)
0322 pr_err("Error %d devcmd %d\n",
0323 err, _CMD_N(cmd));
0324
0325 return err;
0326 }
0327 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
0328 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
0329 vdev->args[i] = result->results[i];
0330 }
0331
0332 return 0;
0333 }
0334 }
0335
0336 pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
0337
0338 return -ETIMEDOUT;
0339 }
0340
0341 static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
0342 {
0343 struct devcmd2_controller *dc2c = NULL;
0344 unsigned int fetch_idx;
0345 int ret;
0346 void __iomem *p;
0347
0348 if (vdev->devcmd2)
0349 return 0;
0350
0351 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
0352 if (!p)
0353 return -ENODEV;
0354
0355 dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
0356 if (!dc2c)
0357 return -ENOMEM;
0358
0359 vdev->devcmd2 = dc2c;
0360
0361 dc2c->color = 1;
0362 dc2c->result_size = DEVCMD2_RING_SIZE;
0363
0364 ret = vnic_wq_devcmd2_alloc(vdev,
0365 &dc2c->wq,
0366 DEVCMD2_RING_SIZE,
0367 DEVCMD2_DESC_SIZE);
0368 if (ret)
0369 goto err_free_devcmd2;
0370
0371 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
0372 if (fetch_idx == 0xFFFFFFFF) {
0373
0374 fetch_idx = 0;
0375 }
0376
0377
0378
0379
0380
0381
0382 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
0383 svnic_wq_enable(&dc2c->wq);
0384 ret = svnic_dev_alloc_desc_ring(vdev,
0385 &dc2c->results_ring,
0386 DEVCMD2_RING_SIZE,
0387 DEVCMD2_DESC_SIZE);
0388 if (ret)
0389 goto err_free_wq;
0390
0391 dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
0392 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
0393 dc2c->wq_ctrl = dc2c->wq.ctrl;
0394 vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
0395 vdev->args[1] = DEVCMD2_RING_SIZE;
0396
0397 ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
0398 if (ret < 0)
0399 goto err_free_desc_ring;
0400
0401 vdev->devcmd_rtn = &_svnic_dev_cmd2;
0402 pr_info("DEVCMD2 Initialized.\n");
0403
0404 return ret;
0405
0406 err_free_desc_ring:
0407 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
0408
0409 err_free_wq:
0410 svnic_wq_disable(&dc2c->wq);
0411 svnic_wq_free(&dc2c->wq);
0412
0413 err_free_devcmd2:
0414 kfree(dc2c);
0415 vdev->devcmd2 = NULL;
0416
0417 return ret;
0418 }
0419
0420 static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
0421 {
0422 struct devcmd2_controller *dc2c = vdev->devcmd2;
0423
0424 vdev->devcmd2 = NULL;
0425 vdev->devcmd_rtn = NULL;
0426
0427 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
0428 svnic_wq_disable(&dc2c->wq);
0429 svnic_wq_free(&dc2c->wq);
0430 kfree(dc2c);
0431 }
0432
0433 int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
0434 u64 *a0, u64 *a1, int wait)
0435 {
0436 int err;
0437
0438 memset(vdev->args, 0, sizeof(vdev->args));
0439 vdev->args[0] = *a0;
0440 vdev->args[1] = *a1;
0441
0442 err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
0443
0444 *a0 = vdev->args[0];
0445 *a1 = vdev->args[1];
0446
0447 return err;
0448 }
0449
0450 int svnic_dev_fw_info(struct vnic_dev *vdev,
0451 struct vnic_devcmd_fw_info **fw_info)
0452 {
0453 u64 a0, a1 = 0;
0454 int wait = VNIC_DVCMD_TMO;
0455 int err = 0;
0456
0457 if (!vdev->fw_info) {
0458 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
0459 sizeof(struct vnic_devcmd_fw_info),
0460 &vdev->fw_info_pa, GFP_KERNEL);
0461 if (!vdev->fw_info)
0462 return -ENOMEM;
0463
0464 a0 = vdev->fw_info_pa;
0465
0466
0467 err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
0468 }
0469
0470 *fw_info = vdev->fw_info;
0471
0472 return err;
0473 }
0474
0475 int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
0476 unsigned int size, void *value)
0477 {
0478 u64 a0, a1;
0479 int wait = VNIC_DVCMD_TMO;
0480 int err;
0481
0482 a0 = offset;
0483 a1 = size;
0484
0485 err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
0486
0487 switch (size) {
0488 case 1:
0489 *(u8 *)value = (u8)a0;
0490 break;
0491 case 2:
0492 *(u16 *)value = (u16)a0;
0493 break;
0494 case 4:
0495 *(u32 *)value = (u32)a0;
0496 break;
0497 case 8:
0498 *(u64 *)value = a0;
0499 break;
0500 default:
0501 BUG();
0502 break;
0503 }
0504
0505 return err;
0506 }
0507
0508 int svnic_dev_stats_clear(struct vnic_dev *vdev)
0509 {
0510 u64 a0 = 0, a1 = 0;
0511 int wait = VNIC_DVCMD_TMO;
0512
0513 return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
0514 }
0515
0516 int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
0517 {
0518 u64 a0, a1;
0519 int wait = VNIC_DVCMD_TMO;
0520
0521 if (!vdev->stats) {
0522 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
0523 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
0524 if (!vdev->stats)
0525 return -ENOMEM;
0526 }
0527
0528 *stats = vdev->stats;
0529 a0 = vdev->stats_pa;
0530 a1 = sizeof(struct vnic_stats);
0531
0532 return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
0533 }
0534
0535 int svnic_dev_close(struct vnic_dev *vdev)
0536 {
0537 u64 a0 = 0, a1 = 0;
0538 int wait = VNIC_DVCMD_TMO;
0539
0540 return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
0541 }
0542
0543 int svnic_dev_enable_wait(struct vnic_dev *vdev)
0544 {
0545 u64 a0 = 0, a1 = 0;
0546 int wait = VNIC_DVCMD_TMO;
0547 int err = 0;
0548
0549 err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
0550 if (err == ERR_ECMDUNKNOWN)
0551 return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
0552
0553 return err;
0554 }
0555
0556 int svnic_dev_disable(struct vnic_dev *vdev)
0557 {
0558 u64 a0 = 0, a1 = 0;
0559 int wait = VNIC_DVCMD_TMO;
0560
0561 return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
0562 }
0563
0564 int svnic_dev_open(struct vnic_dev *vdev, int arg)
0565 {
0566 u64 a0 = (u32)arg, a1 = 0;
0567 int wait = VNIC_DVCMD_TMO;
0568
0569 return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
0570 }
0571
0572 int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
0573 {
0574 u64 a0 = 0, a1 = 0;
0575 int wait = VNIC_DVCMD_TMO;
0576 int err;
0577
0578 *done = 0;
0579
0580 err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
0581 if (err)
0582 return err;
0583
0584 *done = (a0 == 0);
0585
0586 return 0;
0587 }
0588
0589 int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
0590 {
0591 u64 a0, a1;
0592 int wait = VNIC_DVCMD_TMO;
0593
0594 if (!vdev->notify) {
0595 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
0596 sizeof(struct vnic_devcmd_notify),
0597 &vdev->notify_pa, GFP_KERNEL);
0598 if (!vdev->notify)
0599 return -ENOMEM;
0600 }
0601
0602 a0 = vdev->notify_pa;
0603 a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
0604 a1 += sizeof(struct vnic_devcmd_notify);
0605
0606 return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
0607 }
0608
0609 void svnic_dev_notify_unset(struct vnic_dev *vdev)
0610 {
0611 u64 a0, a1;
0612 int wait = VNIC_DVCMD_TMO;
0613
0614 a0 = 0;
0615 a1 = VNIC_NOTIFY_INTR_MASK;
0616 a1 += sizeof(struct vnic_devcmd_notify);
0617
0618 svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
0619 }
0620
0621 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
0622 {
0623 u32 *words;
0624 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
0625 unsigned int i;
0626 u32 csum;
0627
0628 if (!vdev->notify)
0629 return 0;
0630
0631 do {
0632 csum = 0;
0633 memcpy(&vdev->notify_copy, vdev->notify,
0634 sizeof(struct vnic_devcmd_notify));
0635 words = (u32 *)&vdev->notify_copy;
0636 for (i = 1; i < nwords; i++)
0637 csum += words[i];
0638 } while (csum != words[0]);
0639
0640 return 1;
0641 }
0642
0643 int svnic_dev_init(struct vnic_dev *vdev, int arg)
0644 {
0645 u64 a0 = (u32)arg, a1 = 0;
0646 int wait = VNIC_DVCMD_TMO;
0647
0648 return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
0649 }
0650
0651 int svnic_dev_link_status(struct vnic_dev *vdev)
0652 {
0653 if (vdev->linkstatus)
0654 return *vdev->linkstatus;
0655
0656 if (!vnic_dev_notify_ready(vdev))
0657 return 0;
0658
0659 return vdev->notify_copy.link_state;
0660 }
0661
0662 u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
0663 {
0664 if (!vnic_dev_notify_ready(vdev))
0665 return 0;
0666
0667 return vdev->notify_copy.link_down_cnt;
0668 }
0669
0670 void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
0671 enum vnic_dev_intr_mode intr_mode)
0672 {
0673 vdev->intr_mode = intr_mode;
0674 }
0675
0676 enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
0677 {
0678 return vdev->intr_mode;
0679 }
0680
0681 void svnic_dev_unregister(struct vnic_dev *vdev)
0682 {
0683 if (vdev) {
0684 if (vdev->notify)
0685 dma_free_coherent(&vdev->pdev->dev,
0686 sizeof(struct vnic_devcmd_notify),
0687 vdev->notify,
0688 vdev->notify_pa);
0689 if (vdev->linkstatus)
0690 dma_free_coherent(&vdev->pdev->dev,
0691 sizeof(u32),
0692 vdev->linkstatus,
0693 vdev->linkstatus_pa);
0694 if (vdev->stats)
0695 dma_free_coherent(&vdev->pdev->dev,
0696 sizeof(struct vnic_stats),
0697 vdev->stats, vdev->stats_pa);
0698 if (vdev->fw_info)
0699 dma_free_coherent(&vdev->pdev->dev,
0700 sizeof(struct vnic_devcmd_fw_info),
0701 vdev->fw_info, vdev->fw_info_pa);
0702 if (vdev->devcmd2)
0703 vnic_dev_deinit_devcmd2(vdev);
0704 kfree(vdev);
0705 }
0706 }
0707
0708 struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
0709 void *priv,
0710 struct pci_dev *pdev,
0711 struct vnic_dev_bar *bar,
0712 unsigned int num_bars)
0713 {
0714 if (!vdev) {
0715 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
0716 if (!vdev)
0717 return NULL;
0718 }
0719
0720 vdev->priv = priv;
0721 vdev->pdev = pdev;
0722
0723 if (vnic_dev_discover_res(vdev, bar, num_bars))
0724 goto err_out;
0725
0726 return vdev;
0727
0728 err_out:
0729 svnic_dev_unregister(vdev);
0730
0731 return NULL;
0732 }
0733
0734
0735
0736
0737 int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
0738 {
0739 int err = -ENODEV;
0740 void __iomem *p;
0741
0742 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
0743 if (p)
0744 err = svnic_dev_init_devcmd2(vdev);
0745 else
0746 pr_err("DEVCMD2 resource not found.\n");
0747
0748 return err;
0749 }