0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/if_arp.h>
0010 #include <linux/virtio.h>
0011 #include <linux/vringh.h>
0012 #include <linux/debugfs.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/genalloc.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/virtio_ids.h>
0019 #include <linux/virtio_caif.h>
0020 #include <linux/virtio_ring.h>
0021 #include <linux/dma-mapping.h>
0022 #include <net/caif/caif_dev.h>
0023 #include <linux/virtio_config.h>
0024
0025 MODULE_LICENSE("GPL v2");
0026 MODULE_AUTHOR("Vicram Arv");
0027 MODULE_AUTHOR("Sjur Brendeland");
0028 MODULE_DESCRIPTION("Virtio CAIF Driver");
0029
0030
0031 #define CFV_DEFAULT_QUOTA 32
0032
0033
0034 #define CFV_DEF_MTU_SIZE 4096
0035 #define CFV_DEF_HEADROOM 32
0036 #define CFV_DEF_TAILROOM 32
0037
0038
0039 #define IP_HDR_ALIGN 4
0040
0041
0042
0043
0044
0045
0046
0047
0048 struct cfv_napi_context {
0049 struct vringh_kiov riov;
0050 unsigned short head;
0051 };
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 struct cfv_stats {
0064 u32 rx_napi_complete;
0065 u32 rx_napi_resched;
0066 u32 rx_nomem;
0067 u32 rx_kicks;
0068 u32 tx_full_ring;
0069 u32 tx_no_mem;
0070 u32 tx_flow_on;
0071 u32 tx_kicks;
0072 };
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 struct cfv_info {
0102 struct caif_dev_common cfdev;
0103 struct virtio_device *vdev;
0104 struct vringh *vr_rx;
0105 struct virtqueue *vq_tx;
0106 struct net_device *ndev;
0107 unsigned int watermark_tx;
0108
0109 spinlock_t tx_lock;
0110 struct tasklet_struct tx_release_tasklet;
0111 struct napi_struct napi;
0112 struct cfv_napi_context ctx;
0113 u16 tx_hr;
0114 u16 rx_hr;
0115 u16 tx_tr;
0116 u16 rx_tr;
0117 u32 mtu;
0118 u32 mru;
0119 size_t allocsz;
0120 void *alloc_addr;
0121 dma_addr_t alloc_dma;
0122 struct gen_pool *genpool;
0123 unsigned long reserved_mem;
0124 size_t reserved_size;
0125 struct cfv_stats stats;
0126 struct dentry *debugfs;
0127 };
0128
0129
0130
0131
0132
0133
0134 struct buf_info {
0135 size_t size;
0136 u8 *vaddr;
0137 };
0138
0139
0140 static void cfv_release_cb(struct virtqueue *vq_tx)
0141 {
0142 struct cfv_info *cfv = vq_tx->vdev->priv;
0143
0144 ++cfv->stats.tx_kicks;
0145 tasklet_schedule(&cfv->tx_release_tasklet);
0146 }
0147
0148 static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
0149 {
0150 if (!buf_info)
0151 return;
0152 gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
0153 buf_info->size);
0154 kfree(buf_info);
0155 }
0156
0157
0158
0159
0160 static void cfv_release_used_buf(struct virtqueue *vq_tx)
0161 {
0162 struct cfv_info *cfv = vq_tx->vdev->priv;
0163 unsigned long flags;
0164
0165 BUG_ON(vq_tx != cfv->vq_tx);
0166
0167 for (;;) {
0168 unsigned int len;
0169 struct buf_info *buf_info;
0170
0171
0172 spin_lock_irqsave(&cfv->tx_lock, flags);
0173 buf_info = virtqueue_get_buf(vq_tx, &len);
0174 spin_unlock_irqrestore(&cfv->tx_lock, flags);
0175
0176
0177 if (!buf_info)
0178 break;
0179
0180 free_buf_info(cfv, buf_info);
0181
0182
0183
0184
0185
0186 if (cfv->vq_tx->num_free <= cfv->watermark_tx)
0187 continue;
0188
0189
0190 if (cfv->reserved_mem == 0 && cfv->genpool)
0191 cfv->reserved_mem =
0192 gen_pool_alloc(cfv->genpool,
0193 cfv->reserved_size);
0194
0195
0196 if (cfv->reserved_mem) {
0197 cfv->watermark_tx =
0198 virtqueue_get_vring_size(cfv->vq_tx);
0199 netif_tx_wake_all_queues(cfv->ndev);
0200
0201
0202
0203 virtqueue_disable_cb(cfv->vq_tx);
0204 ++cfv->stats.tx_flow_on;
0205 } else {
0206
0207 WARN_ON(cfv->watermark_tx >
0208 virtqueue_get_vring_size(cfv->vq_tx));
0209 cfv->watermark_tx +=
0210 virtqueue_get_vring_size(cfv->vq_tx) / 4;
0211 }
0212 }
0213 }
0214
0215
0216 static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
0217 struct cfv_info *cfv,
0218 u8 *frm, u32 frm_len)
0219 {
0220 struct sk_buff *skb;
0221 u32 cfpkt_len, pad_len;
0222
0223 *err = 0;
0224
0225 if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
0226 netdev_err(cfv->ndev,
0227 "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
0228 frm_len, cfv->mru, cfv->rx_hr,
0229 cfv->rx_tr);
0230 *err = -EPROTO;
0231 return NULL;
0232 }
0233
0234 cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
0235 pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
0236
0237 skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
0238 if (!skb) {
0239 *err = -ENOMEM;
0240 return NULL;
0241 }
0242
0243 skb_reserve(skb, cfv->rx_hr + pad_len);
0244
0245 skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
0246 return skb;
0247 }
0248
0249
0250 static int cfv_rx_poll(struct napi_struct *napi, int quota)
0251 {
0252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
0253 int rxcnt = 0;
0254 int err = 0;
0255 void *buf;
0256 struct sk_buff *skb;
0257 struct vringh_kiov *riov = &cfv->ctx.riov;
0258 unsigned int skb_len;
0259
0260 do {
0261 skb = NULL;
0262
0263
0264
0265
0266 if (riov->i == riov->used) {
0267 if (cfv->ctx.head != USHRT_MAX) {
0268 vringh_complete_kern(cfv->vr_rx,
0269 cfv->ctx.head,
0270 0);
0271 cfv->ctx.head = USHRT_MAX;
0272 }
0273
0274 err = vringh_getdesc_kern(
0275 cfv->vr_rx,
0276 riov,
0277 NULL,
0278 &cfv->ctx.head,
0279 GFP_ATOMIC);
0280
0281 if (err <= 0)
0282 goto exit;
0283 }
0284
0285 buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
0286
0287
0288 skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
0289 riov->iov[riov->i].iov_len);
0290 if (unlikely(err))
0291 goto exit;
0292
0293
0294 skb_len = skb->len;
0295 skb->protocol = htons(ETH_P_CAIF);
0296 skb_reset_mac_header(skb);
0297 skb->dev = cfv->ndev;
0298 err = netif_receive_skb(skb);
0299 if (unlikely(err)) {
0300 ++cfv->ndev->stats.rx_dropped;
0301 } else {
0302 ++cfv->ndev->stats.rx_packets;
0303 cfv->ndev->stats.rx_bytes += skb_len;
0304 }
0305
0306 ++riov->i;
0307 ++rxcnt;
0308 } while (rxcnt < quota);
0309
0310 ++cfv->stats.rx_napi_resched;
0311 goto out;
0312
0313 exit:
0314 switch (err) {
0315 case 0:
0316 ++cfv->stats.rx_napi_complete;
0317
0318
0319 napi_complete(napi);
0320 if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
0321 napi_schedule_prep(napi)) {
0322 vringh_notify_disable_kern(cfv->vr_rx);
0323 __napi_schedule(napi);
0324 }
0325 break;
0326
0327 case -ENOMEM:
0328 ++cfv->stats.rx_nomem;
0329 dev_kfree_skb(skb);
0330
0331 napi_complete(napi);
0332 vringh_notify_enable_kern(cfv->vr_rx);
0333 break;
0334
0335 default:
0336
0337 netdev_warn(cfv->ndev, "Bad ring, disable device\n");
0338 cfv->ndev->stats.rx_dropped = riov->used - riov->i;
0339 napi_complete(napi);
0340 vringh_notify_disable_kern(cfv->vr_rx);
0341 netif_carrier_off(cfv->ndev);
0342 break;
0343 }
0344 out:
0345 if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
0346 vringh_notify(cfv->vr_rx);
0347 return rxcnt;
0348 }
0349
0350 static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
0351 {
0352 struct cfv_info *cfv = vdev->priv;
0353
0354 ++cfv->stats.rx_kicks;
0355 vringh_notify_disable_kern(cfv->vr_rx);
0356 napi_schedule(&cfv->napi);
0357 }
0358
0359 static void cfv_destroy_genpool(struct cfv_info *cfv)
0360 {
0361 if (cfv->alloc_addr)
0362 dma_free_coherent(cfv->vdev->dev.parent->parent,
0363 cfv->allocsz, cfv->alloc_addr,
0364 cfv->alloc_dma);
0365
0366 if (!cfv->genpool)
0367 return;
0368 gen_pool_free(cfv->genpool, cfv->reserved_mem,
0369 cfv->reserved_size);
0370 gen_pool_destroy(cfv->genpool);
0371 cfv->genpool = NULL;
0372 }
0373
0374 static int cfv_create_genpool(struct cfv_info *cfv)
0375 {
0376 int err;
0377
0378
0379
0380
0381
0382
0383 err = -ENOMEM;
0384 cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
0385 (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
0386 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
0387 return -EINVAL;
0388
0389 for (;;) {
0390 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
0391 netdev_info(cfv->ndev, "Not enough device memory\n");
0392 return -ENOMEM;
0393 }
0394
0395 cfv->alloc_addr = dma_alloc_coherent(
0396 cfv->vdev->dev.parent->parent,
0397 cfv->allocsz, &cfv->alloc_dma,
0398 GFP_ATOMIC);
0399 if (cfv->alloc_addr)
0400 break;
0401
0402 cfv->allocsz = (cfv->allocsz * 3) >> 2;
0403 }
0404
0405 netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
0406 cfv->allocsz);
0407
0408
0409 cfv->genpool = gen_pool_create(7, -1);
0410 if (!cfv->genpool)
0411 goto err;
0412
0413 err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
0414 (phys_addr_t)virt_to_phys(cfv->alloc_addr),
0415 cfv->allocsz, -1);
0416 if (err)
0417 goto err;
0418
0419
0420
0421
0422 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
0423 cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
0424 cfv->reserved_size);
0425 if (!cfv->reserved_mem) {
0426 err = -ENOMEM;
0427 goto err;
0428 }
0429
0430 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
0431 return 0;
0432 err:
0433 cfv_destroy_genpool(cfv);
0434 return err;
0435 }
0436
0437
0438 static int cfv_netdev_open(struct net_device *netdev)
0439 {
0440 struct cfv_info *cfv = netdev_priv(netdev);
0441
0442 if (cfv_create_genpool(cfv))
0443 return -ENOMEM;
0444
0445 netif_carrier_on(netdev);
0446 napi_enable(&cfv->napi);
0447
0448
0449 napi_schedule(&cfv->napi);
0450 return 0;
0451 }
0452
0453
0454 static int cfv_netdev_close(struct net_device *netdev)
0455 {
0456 struct cfv_info *cfv = netdev_priv(netdev);
0457 unsigned long flags;
0458 struct buf_info *buf_info;
0459
0460
0461 netif_carrier_off(netdev);
0462 virtqueue_disable_cb(cfv->vq_tx);
0463 vringh_notify_disable_kern(cfv->vr_rx);
0464 napi_disable(&cfv->napi);
0465
0466
0467 cfv_release_used_buf(cfv->vq_tx);
0468 spin_lock_irqsave(&cfv->tx_lock, flags);
0469 while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
0470 free_buf_info(cfv, buf_info);
0471 spin_unlock_irqrestore(&cfv->tx_lock, flags);
0472
0473
0474 cfv_destroy_genpool(cfv);
0475 return 0;
0476 }
0477
0478
0479 static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
0480 struct sk_buff *skb,
0481 struct scatterlist *sg)
0482 {
0483 struct caif_payload_info *info = (void *)&skb->cb;
0484 struct buf_info *buf_info = NULL;
0485 u8 pad_len, hdr_ofs;
0486
0487 if (!cfv->genpool)
0488 goto err;
0489
0490 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
0491 netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
0492 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
0493 goto err;
0494 }
0495
0496 buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
0497 if (unlikely(!buf_info))
0498 goto err;
0499
0500
0501 hdr_ofs = cfv->tx_hr + info->hdr_len;
0502 pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
0503 buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
0504
0505
0506 buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
0507 if (unlikely(!buf_info->vaddr))
0508 goto err;
0509
0510
0511 skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
0512 sg_init_one(sg, buf_info->vaddr + pad_len,
0513 skb->len + cfv->tx_hr + cfv->rx_hr);
0514
0515 return buf_info;
0516 err:
0517 kfree(buf_info);
0518 return NULL;
0519 }
0520
0521
0522 static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
0523 {
0524 struct cfv_info *cfv = netdev_priv(netdev);
0525 struct buf_info *buf_info;
0526 struct scatterlist sg;
0527 unsigned long flags;
0528 bool flow_off = false;
0529 int ret;
0530
0531
0532 cfv_release_used_buf(cfv->vq_tx);
0533 spin_lock_irqsave(&cfv->tx_lock, flags);
0534
0535
0536
0537
0538
0539
0540 if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
0541 flow_off = true;
0542 cfv->stats.tx_full_ring++;
0543 }
0544
0545
0546
0547
0548 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
0549 if (unlikely(!buf_info)) {
0550 cfv->stats.tx_no_mem++;
0551 flow_off = true;
0552
0553 if (cfv->reserved_mem && cfv->genpool) {
0554 gen_pool_free(cfv->genpool, cfv->reserved_mem,
0555 cfv->reserved_size);
0556 cfv->reserved_mem = 0;
0557 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
0558 }
0559 }
0560
0561 if (unlikely(flow_off)) {
0562
0563 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
0564
0565 virtqueue_enable_cb(cfv->vq_tx);
0566 netif_tx_stop_all_queues(netdev);
0567 }
0568
0569 if (unlikely(!buf_info)) {
0570
0571 netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
0572 goto err;
0573 }
0574
0575 ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
0576 if (unlikely((ret < 0))) {
0577
0578 netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
0579 ret);
0580 goto err;
0581 }
0582
0583
0584 cfv->ndev->stats.tx_packets++;
0585 cfv->ndev->stats.tx_bytes += skb->len;
0586 spin_unlock_irqrestore(&cfv->tx_lock, flags);
0587
0588
0589 virtqueue_kick(cfv->vq_tx);
0590
0591 dev_kfree_skb(skb);
0592 return NETDEV_TX_OK;
0593 err:
0594 spin_unlock_irqrestore(&cfv->tx_lock, flags);
0595 cfv->ndev->stats.tx_dropped++;
0596 free_buf_info(cfv, buf_info);
0597 dev_kfree_skb(skb);
0598 return NETDEV_TX_OK;
0599 }
0600
0601 static void cfv_tx_release_tasklet(struct tasklet_struct *t)
0602 {
0603 struct cfv_info *cfv = from_tasklet(cfv, t, tx_release_tasklet);
0604 cfv_release_used_buf(cfv->vq_tx);
0605 }
0606
0607 static const struct net_device_ops cfv_netdev_ops = {
0608 .ndo_open = cfv_netdev_open,
0609 .ndo_stop = cfv_netdev_close,
0610 .ndo_start_xmit = cfv_netdev_tx,
0611 };
0612
0613 static void cfv_netdev_setup(struct net_device *netdev)
0614 {
0615 netdev->netdev_ops = &cfv_netdev_ops;
0616 netdev->type = ARPHRD_CAIF;
0617 netdev->tx_queue_len = 100;
0618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
0619 netdev->mtu = CFV_DEF_MTU_SIZE;
0620 netdev->needs_free_netdev = true;
0621 }
0622
0623
0624 static inline void debugfs_init(struct cfv_info *cfv)
0625 {
0626 cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL);
0627
0628 debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
0629 &cfv->stats.rx_napi_complete);
0630 debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs,
0631 &cfv->stats.rx_napi_resched);
0632 debugfs_create_u32("rx-nomem", 0400, cfv->debugfs,
0633 &cfv->stats.rx_nomem);
0634 debugfs_create_u32("rx-kicks", 0400, cfv->debugfs,
0635 &cfv->stats.rx_kicks);
0636 debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs,
0637 &cfv->stats.tx_full_ring);
0638 debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs,
0639 &cfv->stats.tx_no_mem);
0640 debugfs_create_u32("tx-kicks", 0400, cfv->debugfs,
0641 &cfv->stats.tx_kicks);
0642 debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs,
0643 &cfv->stats.tx_flow_on);
0644 }
0645
0646
0647 static int cfv_probe(struct virtio_device *vdev)
0648 {
0649 vq_callback_t *vq_cbs = cfv_release_cb;
0650 vrh_callback_t *vrh_cbs = cfv_recv;
0651 const char *names = "output";
0652 const char *cfv_netdev_name = "cfvrt";
0653 struct net_device *netdev;
0654 struct cfv_info *cfv;
0655 int err;
0656
0657 netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
0658 NET_NAME_UNKNOWN, cfv_netdev_setup);
0659 if (!netdev)
0660 return -ENOMEM;
0661
0662 cfv = netdev_priv(netdev);
0663 cfv->vdev = vdev;
0664 cfv->ndev = netdev;
0665
0666 spin_lock_init(&cfv->tx_lock);
0667
0668
0669 err = -ENODEV;
0670 if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
0671 goto err;
0672
0673 err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
0674 if (err)
0675 goto err;
0676
0677
0678 err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
0679 if (err)
0680 goto err;
0681
0682
0683 if (vdev->config->get) {
0684 virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
0685 &cfv->tx_hr);
0686 virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
0687 &cfv->rx_hr);
0688 virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
0689 &cfv->tx_tr);
0690 virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
0691 &cfv->rx_tr);
0692 virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
0693 &cfv->mtu);
0694 virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
0695 &cfv->mru);
0696 } else {
0697 cfv->tx_hr = CFV_DEF_HEADROOM;
0698 cfv->rx_hr = CFV_DEF_HEADROOM;
0699 cfv->tx_tr = CFV_DEF_TAILROOM;
0700 cfv->rx_tr = CFV_DEF_TAILROOM;
0701 cfv->mtu = CFV_DEF_MTU_SIZE;
0702 cfv->mru = CFV_DEF_MTU_SIZE;
0703 }
0704
0705 netdev->needed_headroom = cfv->tx_hr;
0706 netdev->needed_tailroom = cfv->tx_tr;
0707
0708
0709 virtqueue_disable_cb(cfv->vq_tx);
0710
0711 netdev->mtu = cfv->mtu - cfv->tx_tr;
0712 vdev->priv = cfv;
0713
0714
0715 vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
0716 cfv->ctx.head = USHRT_MAX;
0717 netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll,
0718 CFV_DEFAULT_QUOTA);
0719
0720 tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet);
0721
0722
0723 netif_carrier_off(netdev);
0724
0725
0726 rtnl_lock();
0727
0728
0729 err = register_netdevice(netdev);
0730 if (err) {
0731 rtnl_unlock();
0732 dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
0733 goto err;
0734 }
0735
0736 virtio_device_ready(vdev);
0737
0738 rtnl_unlock();
0739
0740 debugfs_init(cfv);
0741
0742 return 0;
0743 err:
0744 netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
0745
0746 if (cfv->vr_rx)
0747 vdev->vringh_config->del_vrhs(cfv->vdev);
0748 if (cfv->vdev)
0749 vdev->config->del_vqs(cfv->vdev);
0750 free_netdev(netdev);
0751 return err;
0752 }
0753
0754 static void cfv_remove(struct virtio_device *vdev)
0755 {
0756 struct cfv_info *cfv = vdev->priv;
0757
0758 rtnl_lock();
0759 dev_close(cfv->ndev);
0760 rtnl_unlock();
0761
0762 tasklet_kill(&cfv->tx_release_tasklet);
0763 debugfs_remove_recursive(cfv->debugfs);
0764
0765 vringh_kiov_cleanup(&cfv->ctx.riov);
0766 virtio_reset_device(vdev);
0767 vdev->vringh_config->del_vrhs(cfv->vdev);
0768 cfv->vr_rx = NULL;
0769 vdev->config->del_vqs(cfv->vdev);
0770 unregister_netdev(cfv->ndev);
0771 }
0772
0773 static struct virtio_device_id id_table[] = {
0774 { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
0775 { 0 },
0776 };
0777
0778 static unsigned int features[] = {
0779 };
0780
0781 static struct virtio_driver caif_virtio_driver = {
0782 .feature_table = features,
0783 .feature_table_size = ARRAY_SIZE(features),
0784 .driver.name = KBUILD_MODNAME,
0785 .driver.owner = THIS_MODULE,
0786 .id_table = id_table,
0787 .probe = cfv_probe,
0788 .remove = cfv_remove,
0789 };
0790
0791 module_virtio_driver(caif_virtio_driver);
0792 MODULE_DEVICE_TABLE(virtio, id_table);