0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/idr.h>
0012 #include <linux/slab.h>
0013 #include <linux/vdpa.h>
0014 #include <uapi/linux/vdpa.h>
0015 #include <net/genetlink.h>
0016 #include <linux/mod_devicetable.h>
0017 #include <linux/virtio_ids.h>
0018
0019 static LIST_HEAD(mdev_head);
0020
0021 static DECLARE_RWSEM(vdpa_dev_lock);
0022 static DEFINE_IDA(vdpa_index_ida);
0023
0024 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
0025 {
0026 down_write(&vdev->cf_lock);
0027 vdev->config->set_status(vdev, status);
0028 up_write(&vdev->cf_lock);
0029 }
0030 EXPORT_SYMBOL(vdpa_set_status);
0031
0032 static struct genl_family vdpa_nl_family;
0033
0034 static int vdpa_dev_probe(struct device *d)
0035 {
0036 struct vdpa_device *vdev = dev_to_vdpa(d);
0037 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
0038 const struct vdpa_config_ops *ops = vdev->config;
0039 u32 max_num, min_num = 1;
0040 int ret = 0;
0041
0042 max_num = ops->get_vq_num_max(vdev);
0043 if (ops->get_vq_num_min)
0044 min_num = ops->get_vq_num_min(vdev);
0045 if (max_num < min_num)
0046 return -EINVAL;
0047
0048 if (drv && drv->probe)
0049 ret = drv->probe(vdev);
0050
0051 return ret;
0052 }
0053
0054 static void vdpa_dev_remove(struct device *d)
0055 {
0056 struct vdpa_device *vdev = dev_to_vdpa(d);
0057 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
0058
0059 if (drv && drv->remove)
0060 drv->remove(vdev);
0061 }
0062
0063 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
0064 {
0065 struct vdpa_device *vdev = dev_to_vdpa(dev);
0066
0067
0068 if (vdev->driver_override)
0069 return strcmp(vdev->driver_override, drv->name) == 0;
0070
0071
0072 return 1;
0073 }
0074
0075 static ssize_t driver_override_store(struct device *dev,
0076 struct device_attribute *attr,
0077 const char *buf, size_t count)
0078 {
0079 struct vdpa_device *vdev = dev_to_vdpa(dev);
0080 int ret;
0081
0082 ret = driver_set_override(dev, &vdev->driver_override, buf, count);
0083 if (ret)
0084 return ret;
0085
0086 return count;
0087 }
0088
0089 static ssize_t driver_override_show(struct device *dev,
0090 struct device_attribute *attr, char *buf)
0091 {
0092 struct vdpa_device *vdev = dev_to_vdpa(dev);
0093 ssize_t len;
0094
0095 device_lock(dev);
0096 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
0097 device_unlock(dev);
0098
0099 return len;
0100 }
0101 static DEVICE_ATTR_RW(driver_override);
0102
0103 static struct attribute *vdpa_dev_attrs[] = {
0104 &dev_attr_driver_override.attr,
0105 NULL,
0106 };
0107
0108 static const struct attribute_group vdpa_dev_group = {
0109 .attrs = vdpa_dev_attrs,
0110 };
0111 __ATTRIBUTE_GROUPS(vdpa_dev);
0112
0113 static struct bus_type vdpa_bus = {
0114 .name = "vdpa",
0115 .dev_groups = vdpa_dev_groups,
0116 .match = vdpa_dev_match,
0117 .probe = vdpa_dev_probe,
0118 .remove = vdpa_dev_remove,
0119 };
0120
0121 static void vdpa_release_dev(struct device *d)
0122 {
0123 struct vdpa_device *vdev = dev_to_vdpa(d);
0124 const struct vdpa_config_ops *ops = vdev->config;
0125
0126 if (ops->free)
0127 ops->free(vdev);
0128
0129 ida_simple_remove(&vdpa_index_ida, vdev->index);
0130 kfree(vdev->driver_override);
0131 kfree(vdev);
0132 }
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
0153 const struct vdpa_config_ops *config,
0154 unsigned int ngroups, unsigned int nas,
0155 size_t size, const char *name,
0156 bool use_va)
0157 {
0158 struct vdpa_device *vdev;
0159 int err = -EINVAL;
0160
0161 if (!config)
0162 goto err;
0163
0164 if (!!config->dma_map != !!config->dma_unmap)
0165 goto err;
0166
0167
0168 if (use_va && !(config->dma_map || config->set_map))
0169 goto err;
0170
0171 err = -ENOMEM;
0172 vdev = kzalloc(size, GFP_KERNEL);
0173 if (!vdev)
0174 goto err;
0175
0176 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
0177 if (err < 0)
0178 goto err_ida;
0179
0180 vdev->dev.bus = &vdpa_bus;
0181 vdev->dev.parent = parent;
0182 vdev->dev.release = vdpa_release_dev;
0183 vdev->index = err;
0184 vdev->config = config;
0185 vdev->features_valid = false;
0186 vdev->use_va = use_va;
0187 vdev->ngroups = ngroups;
0188 vdev->nas = nas;
0189
0190 if (name)
0191 err = dev_set_name(&vdev->dev, "%s", name);
0192 else
0193 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
0194 if (err)
0195 goto err_name;
0196
0197 init_rwsem(&vdev->cf_lock);
0198 device_initialize(&vdev->dev);
0199
0200 return vdev;
0201
0202 err_name:
0203 ida_simple_remove(&vdpa_index_ida, vdev->index);
0204 err_ida:
0205 kfree(vdev);
0206 err:
0207 return ERR_PTR(err);
0208 }
0209 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
0210
0211 static int vdpa_name_match(struct device *dev, const void *data)
0212 {
0213 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
0214
0215 return (strcmp(dev_name(&vdev->dev), data) == 0);
0216 }
0217
0218 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
0219 {
0220 struct device *dev;
0221
0222 vdev->nvqs = nvqs;
0223
0224 lockdep_assert_held(&vdpa_dev_lock);
0225 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
0226 if (dev) {
0227 put_device(dev);
0228 return -EEXIST;
0229 }
0230 return device_add(&vdev->dev);
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
0244 {
0245 if (!vdev->mdev)
0246 return -EINVAL;
0247
0248 return __vdpa_register_device(vdev, nvqs);
0249 }
0250 EXPORT_SYMBOL_GPL(_vdpa_register_device);
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
0261 {
0262 int err;
0263
0264 down_write(&vdpa_dev_lock);
0265 err = __vdpa_register_device(vdev, nvqs);
0266 up_write(&vdpa_dev_lock);
0267 return err;
0268 }
0269 EXPORT_SYMBOL_GPL(vdpa_register_device);
0270
0271
0272
0273
0274
0275
0276
0277 void _vdpa_unregister_device(struct vdpa_device *vdev)
0278 {
0279 lockdep_assert_held(&vdpa_dev_lock);
0280 WARN_ON(!vdev->mdev);
0281 device_unregister(&vdev->dev);
0282 }
0283 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
0284
0285
0286
0287
0288
0289 void vdpa_unregister_device(struct vdpa_device *vdev)
0290 {
0291 down_write(&vdpa_dev_lock);
0292 device_unregister(&vdev->dev);
0293 up_write(&vdpa_dev_lock);
0294 }
0295 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
0296
0297
0298
0299
0300
0301
0302
0303
0304 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
0305 {
0306 drv->driver.bus = &vdpa_bus;
0307 drv->driver.owner = owner;
0308
0309 return driver_register(&drv->driver);
0310 }
0311 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
0312
0313
0314
0315
0316
0317 void vdpa_unregister_driver(struct vdpa_driver *drv)
0318 {
0319 driver_unregister(&drv->driver);
0320 }
0321 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
0333 {
0334 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
0335 return -EINVAL;
0336
0337 INIT_LIST_HEAD(&mdev->list);
0338 down_write(&vdpa_dev_lock);
0339 list_add_tail(&mdev->list, &mdev_head);
0340 up_write(&vdpa_dev_lock);
0341 return 0;
0342 }
0343 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
0344
0345 static int vdpa_match_remove(struct device *dev, void *data)
0346 {
0347 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
0348 struct vdpa_mgmt_dev *mdev = vdev->mdev;
0349
0350 if (mdev == data)
0351 mdev->ops->dev_del(mdev, vdev);
0352 return 0;
0353 }
0354
0355 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
0356 {
0357 down_write(&vdpa_dev_lock);
0358
0359 list_del(&mdev->list);
0360
0361
0362 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
0363
0364 up_write(&vdpa_dev_lock);
0365 }
0366 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
0367
0368 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
0369 unsigned int offset,
0370 void *buf, unsigned int len)
0371 {
0372 const struct vdpa_config_ops *ops = vdev->config;
0373
0374
0375
0376
0377
0378 if (!vdev->features_valid)
0379 vdpa_set_features_unlocked(vdev, 0);
0380 ops->get_config(vdev, offset, buf, len);
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
0391 void *buf, unsigned int len)
0392 {
0393 down_read(&vdev->cf_lock);
0394 vdpa_get_config_unlocked(vdev, offset, buf, len);
0395 up_read(&vdev->cf_lock);
0396 }
0397 EXPORT_SYMBOL_GPL(vdpa_get_config);
0398
0399
0400
0401
0402
0403
0404
0405
0406 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
0407 const void *buf, unsigned int length)
0408 {
0409 down_write(&vdev->cf_lock);
0410 vdev->config->set_config(vdev, offset, buf, length);
0411 up_write(&vdev->cf_lock);
0412 }
0413 EXPORT_SYMBOL_GPL(vdpa_set_config);
0414
0415 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
0416 const char *busname, const char *devname)
0417 {
0418
0419
0420
0421 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
0422 return false;
0423
0424 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
0425 return true;
0426
0427 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
0428 (strcmp(dev_name(mdev->device), devname) == 0))
0429 return true;
0430
0431 return false;
0432 }
0433
0434 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
0435 {
0436 struct vdpa_mgmt_dev *mdev;
0437 const char *busname = NULL;
0438 const char *devname;
0439
0440 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
0441 return ERR_PTR(-EINVAL);
0442 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
0443 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
0444 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
0445
0446 list_for_each_entry(mdev, &mdev_head, list) {
0447 if (mgmtdev_handle_match(mdev, busname, devname))
0448 return mdev;
0449 }
0450 return ERR_PTR(-ENODEV);
0451 }
0452
0453 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
0454 {
0455 if (mdev->device->bus &&
0456 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
0457 return -EMSGSIZE;
0458 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
0459 return -EMSGSIZE;
0460 return 0;
0461 }
0462
0463 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
0464 u32 portid, u32 seq, int flags)
0465 {
0466 u64 supported_classes = 0;
0467 void *hdr;
0468 int i = 0;
0469 int err;
0470
0471 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
0472 if (!hdr)
0473 return -EMSGSIZE;
0474 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
0475 if (err)
0476 goto msg_err;
0477
0478 while (mdev->id_table[i].device) {
0479 if (mdev->id_table[i].device <= 63)
0480 supported_classes |= BIT_ULL(mdev->id_table[i].device);
0481 i++;
0482 }
0483
0484 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
0485 supported_classes, VDPA_ATTR_UNSPEC)) {
0486 err = -EMSGSIZE;
0487 goto msg_err;
0488 }
0489 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
0490 mdev->max_supported_vqs)) {
0491 err = -EMSGSIZE;
0492 goto msg_err;
0493 }
0494 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
0495 mdev->supported_features, VDPA_ATTR_PAD)) {
0496 err = -EMSGSIZE;
0497 goto msg_err;
0498 }
0499
0500 genlmsg_end(msg, hdr);
0501 return 0;
0502
0503 msg_err:
0504 genlmsg_cancel(msg, hdr);
0505 return err;
0506 }
0507
0508 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
0509 {
0510 struct vdpa_mgmt_dev *mdev;
0511 struct sk_buff *msg;
0512 int err;
0513
0514 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
0515 if (!msg)
0516 return -ENOMEM;
0517
0518 down_read(&vdpa_dev_lock);
0519 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
0520 if (IS_ERR(mdev)) {
0521 up_read(&vdpa_dev_lock);
0522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
0523 err = PTR_ERR(mdev);
0524 goto out;
0525 }
0526
0527 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
0528 up_read(&vdpa_dev_lock);
0529 if (err)
0530 goto out;
0531 err = genlmsg_reply(msg, info);
0532 return err;
0533
0534 out:
0535 nlmsg_free(msg);
0536 return err;
0537 }
0538
0539 static int
0540 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
0541 {
0542 struct vdpa_mgmt_dev *mdev;
0543 int start = cb->args[0];
0544 int idx = 0;
0545 int err;
0546
0547 down_read(&vdpa_dev_lock);
0548 list_for_each_entry(mdev, &mdev_head, list) {
0549 if (idx < start) {
0550 idx++;
0551 continue;
0552 }
0553 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
0554 cb->nlh->nlmsg_seq, NLM_F_MULTI);
0555 if (err)
0556 goto out;
0557 idx++;
0558 }
0559 out:
0560 up_read(&vdpa_dev_lock);
0561 cb->args[0] = idx;
0562 return msg->len;
0563 }
0564
0565 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
0566 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
0567 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
0568
0569 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
0570 {
0571 struct vdpa_dev_set_config config = {};
0572 struct nlattr **nl_attrs = info->attrs;
0573 struct vdpa_mgmt_dev *mdev;
0574 const u8 *macaddr;
0575 const char *name;
0576 int err = 0;
0577
0578 if (!info->attrs[VDPA_ATTR_DEV_NAME])
0579 return -EINVAL;
0580
0581 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
0582
0583 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
0584 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
0585 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
0586 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
0587 }
0588 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
0589 config.net.mtu =
0590 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
0591 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
0592 }
0593 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
0594 config.net.max_vq_pairs =
0595 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
0596 if (!config.net.max_vq_pairs) {
0597 NL_SET_ERR_MSG_MOD(info->extack,
0598 "At least one pair of VQs is required");
0599 return -EINVAL;
0600 }
0601 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
0602 }
0603
0604
0605
0606
0607
0608
0609 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
0610 !netlink_capable(skb, CAP_NET_ADMIN))
0611 return -EPERM;
0612
0613 down_write(&vdpa_dev_lock);
0614 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
0615 if (IS_ERR(mdev)) {
0616 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
0617 err = PTR_ERR(mdev);
0618 goto err;
0619 }
0620 if ((config.mask & mdev->config_attr_mask) != config.mask) {
0621 NL_SET_ERR_MSG_MOD(info->extack,
0622 "All provided attributes are not supported");
0623 err = -EOPNOTSUPP;
0624 goto err;
0625 }
0626
0627 err = mdev->ops->dev_add(mdev, name, &config);
0628 err:
0629 up_write(&vdpa_dev_lock);
0630 return err;
0631 }
0632
0633 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
0634 {
0635 struct vdpa_mgmt_dev *mdev;
0636 struct vdpa_device *vdev;
0637 struct device *dev;
0638 const char *name;
0639 int err = 0;
0640
0641 if (!info->attrs[VDPA_ATTR_DEV_NAME])
0642 return -EINVAL;
0643 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
0644
0645 down_write(&vdpa_dev_lock);
0646 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
0647 if (!dev) {
0648 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
0649 err = -ENODEV;
0650 goto dev_err;
0651 }
0652 vdev = container_of(dev, struct vdpa_device, dev);
0653 if (!vdev->mdev) {
0654 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
0655 err = -EINVAL;
0656 goto mdev_err;
0657 }
0658 mdev = vdev->mdev;
0659 mdev->ops->dev_del(mdev, vdev);
0660 mdev_err:
0661 put_device(dev);
0662 dev_err:
0663 up_write(&vdpa_dev_lock);
0664 return err;
0665 }
0666
0667 static int
0668 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
0669 int flags, struct netlink_ext_ack *extack)
0670 {
0671 u16 max_vq_size;
0672 u16 min_vq_size = 1;
0673 u32 device_id;
0674 u32 vendor_id;
0675 void *hdr;
0676 int err;
0677
0678 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
0679 if (!hdr)
0680 return -EMSGSIZE;
0681
0682 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
0683 if (err)
0684 goto msg_err;
0685
0686 device_id = vdev->config->get_device_id(vdev);
0687 vendor_id = vdev->config->get_vendor_id(vdev);
0688 max_vq_size = vdev->config->get_vq_num_max(vdev);
0689 if (vdev->config->get_vq_num_min)
0690 min_vq_size = vdev->config->get_vq_num_min(vdev);
0691
0692 err = -EMSGSIZE;
0693 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
0694 goto msg_err;
0695 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
0696 goto msg_err;
0697 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
0698 goto msg_err;
0699 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
0700 goto msg_err;
0701 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
0702 goto msg_err;
0703 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
0704 goto msg_err;
0705
0706 genlmsg_end(msg, hdr);
0707 return 0;
0708
0709 msg_err:
0710 genlmsg_cancel(msg, hdr);
0711 return err;
0712 }
0713
0714 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
0715 {
0716 struct vdpa_device *vdev;
0717 struct sk_buff *msg;
0718 const char *devname;
0719 struct device *dev;
0720 int err;
0721
0722 if (!info->attrs[VDPA_ATTR_DEV_NAME])
0723 return -EINVAL;
0724 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
0725 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
0726 if (!msg)
0727 return -ENOMEM;
0728
0729 down_read(&vdpa_dev_lock);
0730 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
0731 if (!dev) {
0732 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
0733 err = -ENODEV;
0734 goto err;
0735 }
0736 vdev = container_of(dev, struct vdpa_device, dev);
0737 if (!vdev->mdev) {
0738 err = -EINVAL;
0739 goto mdev_err;
0740 }
0741 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
0742 if (err)
0743 goto mdev_err;
0744
0745 err = genlmsg_reply(msg, info);
0746 put_device(dev);
0747 up_read(&vdpa_dev_lock);
0748 return err;
0749
0750 mdev_err:
0751 put_device(dev);
0752 err:
0753 up_read(&vdpa_dev_lock);
0754 nlmsg_free(msg);
0755 return err;
0756 }
0757
0758 struct vdpa_dev_dump_info {
0759 struct sk_buff *msg;
0760 struct netlink_callback *cb;
0761 int start_idx;
0762 int idx;
0763 };
0764
0765 static int vdpa_dev_dump(struct device *dev, void *data)
0766 {
0767 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
0768 struct vdpa_dev_dump_info *info = data;
0769 int err;
0770
0771 if (!vdev->mdev)
0772 return 0;
0773 if (info->idx < info->start_idx) {
0774 info->idx++;
0775 return 0;
0776 }
0777 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
0778 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
0779 if (err)
0780 return err;
0781
0782 info->idx++;
0783 return 0;
0784 }
0785
0786 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
0787 {
0788 struct vdpa_dev_dump_info info;
0789
0790 info.msg = msg;
0791 info.cb = cb;
0792 info.start_idx = cb->args[0];
0793 info.idx = 0;
0794
0795 down_read(&vdpa_dev_lock);
0796 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
0797 up_read(&vdpa_dev_lock);
0798 cb->args[0] = info.idx;
0799 return msg->len;
0800 }
0801
0802 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
0803 struct sk_buff *msg, u64 features,
0804 const struct virtio_net_config *config)
0805 {
0806 u16 val_u16;
0807
0808 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
0809 return 0;
0810
0811 val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
0812 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
0813 }
0814
0815 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
0816 {
0817 struct virtio_net_config config = {};
0818 u64 features;
0819 u16 val_u16;
0820
0821 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
0822
0823 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
0824 config.mac))
0825 return -EMSGSIZE;
0826
0827 val_u16 = __virtio16_to_cpu(true, config.status);
0828 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
0829 return -EMSGSIZE;
0830
0831 val_u16 = __virtio16_to_cpu(true, config.mtu);
0832 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
0833 return -EMSGSIZE;
0834
0835 features = vdev->config->get_driver_features(vdev);
0836 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
0837 VDPA_ATTR_PAD))
0838 return -EMSGSIZE;
0839
0840 return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
0841 }
0842
0843 static int
0844 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
0845 int flags, struct netlink_ext_ack *extack)
0846 {
0847 u32 device_id;
0848 void *hdr;
0849 int err;
0850
0851 down_read(&vdev->cf_lock);
0852 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
0853 VDPA_CMD_DEV_CONFIG_GET);
0854 if (!hdr) {
0855 err = -EMSGSIZE;
0856 goto out;
0857 }
0858
0859 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
0860 err = -EMSGSIZE;
0861 goto msg_err;
0862 }
0863
0864 device_id = vdev->config->get_device_id(vdev);
0865 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
0866 err = -EMSGSIZE;
0867 goto msg_err;
0868 }
0869
0870 switch (device_id) {
0871 case VIRTIO_ID_NET:
0872 err = vdpa_dev_net_config_fill(vdev, msg);
0873 break;
0874 default:
0875 err = -EOPNOTSUPP;
0876 break;
0877 }
0878 if (err)
0879 goto msg_err;
0880
0881 up_read(&vdev->cf_lock);
0882 genlmsg_end(msg, hdr);
0883 return 0;
0884
0885 msg_err:
0886 genlmsg_cancel(msg, hdr);
0887 out:
0888 up_read(&vdev->cf_lock);
0889 return err;
0890 }
0891
0892 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
0893 struct genl_info *info, u32 index)
0894 {
0895 struct virtio_net_config config = {};
0896 u64 features;
0897 u16 max_vqp;
0898 u8 status;
0899 int err;
0900
0901 status = vdev->config->get_status(vdev);
0902 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
0903 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
0904 return -EAGAIN;
0905 }
0906 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
0907
0908 max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
0909 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
0910 return -EMSGSIZE;
0911
0912 features = vdev->config->get_driver_features(vdev);
0913 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
0914 features, VDPA_ATTR_PAD))
0915 return -EMSGSIZE;
0916
0917 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
0918 return -EMSGSIZE;
0919
0920 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
0921 if (err)
0922 return err;
0923
0924 return 0;
0925 }
0926
0927 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
0928 struct genl_info *info, u32 index)
0929 {
0930 int err;
0931
0932 down_read(&vdev->cf_lock);
0933 if (!vdev->config->get_vendor_vq_stats) {
0934 err = -EOPNOTSUPP;
0935 goto out;
0936 }
0937
0938 err = vdpa_fill_stats_rec(vdev, msg, info, index);
0939 out:
0940 up_read(&vdev->cf_lock);
0941 return err;
0942 }
0943
0944 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
0945 struct sk_buff *msg,
0946 struct genl_info *info, u32 index)
0947 {
0948 u32 device_id;
0949 void *hdr;
0950 int err;
0951 u32 portid = info->snd_portid;
0952 u32 seq = info->snd_seq;
0953 u32 flags = 0;
0954
0955 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
0956 VDPA_CMD_DEV_VSTATS_GET);
0957 if (!hdr)
0958 return -EMSGSIZE;
0959
0960 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
0961 err = -EMSGSIZE;
0962 goto undo_msg;
0963 }
0964
0965 device_id = vdev->config->get_device_id(vdev);
0966 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
0967 err = -EMSGSIZE;
0968 goto undo_msg;
0969 }
0970
0971 switch (device_id) {
0972 case VIRTIO_ID_NET:
0973 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
0974 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
0975 err = -ERANGE;
0976 break;
0977 }
0978
0979 err = vendor_stats_fill(vdev, msg, info, index);
0980 break;
0981 default:
0982 err = -EOPNOTSUPP;
0983 break;
0984 }
0985 genlmsg_end(msg, hdr);
0986
0987 return err;
0988
0989 undo_msg:
0990 genlmsg_cancel(msg, hdr);
0991 return err;
0992 }
0993
0994 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
0995 {
0996 struct vdpa_device *vdev;
0997 struct sk_buff *msg;
0998 const char *devname;
0999 struct device *dev;
1000 int err;
1001
1002 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1003 return -EINVAL;
1004 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1005 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1006 if (!msg)
1007 return -ENOMEM;
1008
1009 down_read(&vdpa_dev_lock);
1010 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1011 if (!dev) {
1012 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1013 err = -ENODEV;
1014 goto dev_err;
1015 }
1016 vdev = container_of(dev, struct vdpa_device, dev);
1017 if (!vdev->mdev) {
1018 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1019 err = -EINVAL;
1020 goto mdev_err;
1021 }
1022 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1023 0, info->extack);
1024 if (!err)
1025 err = genlmsg_reply(msg, info);
1026
1027 mdev_err:
1028 put_device(dev);
1029 dev_err:
1030 up_read(&vdpa_dev_lock);
1031 if (err)
1032 nlmsg_free(msg);
1033 return err;
1034 }
1035
1036 static int vdpa_dev_config_dump(struct device *dev, void *data)
1037 {
1038 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1039 struct vdpa_dev_dump_info *info = data;
1040 int err;
1041
1042 if (!vdev->mdev)
1043 return 0;
1044 if (info->idx < info->start_idx) {
1045 info->idx++;
1046 return 0;
1047 }
1048 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1049 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1050 info->cb->extack);
1051 if (err)
1052 return err;
1053
1054 info->idx++;
1055 return 0;
1056 }
1057
1058 static int
1059 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1060 {
1061 struct vdpa_dev_dump_info info;
1062
1063 info.msg = msg;
1064 info.cb = cb;
1065 info.start_idx = cb->args[0];
1066 info.idx = 0;
1067
1068 down_read(&vdpa_dev_lock);
1069 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1070 up_read(&vdpa_dev_lock);
1071 cb->args[0] = info.idx;
1072 return msg->len;
1073 }
1074
1075 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1076 struct genl_info *info)
1077 {
1078 struct vdpa_device *vdev;
1079 struct sk_buff *msg;
1080 const char *devname;
1081 struct device *dev;
1082 u32 index;
1083 int err;
1084
1085 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1086 return -EINVAL;
1087
1088 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1089 return -EINVAL;
1090
1091 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1092 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1093 if (!msg)
1094 return -ENOMEM;
1095
1096 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1097 down_read(&vdpa_dev_lock);
1098 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1099 if (!dev) {
1100 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1101 err = -ENODEV;
1102 goto dev_err;
1103 }
1104 vdev = container_of(dev, struct vdpa_device, dev);
1105 if (!vdev->mdev) {
1106 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1107 err = -EINVAL;
1108 goto mdev_err;
1109 }
1110 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1111 if (err)
1112 goto mdev_err;
1113
1114 err = genlmsg_reply(msg, info);
1115
1116 put_device(dev);
1117 up_read(&vdpa_dev_lock);
1118
1119 return err;
1120
1121 mdev_err:
1122 put_device(dev);
1123 dev_err:
1124 nlmsg_free(msg);
1125 up_read(&vdpa_dev_lock);
1126 return err;
1127 }
1128
1129 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1130 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1131 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1132 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1133 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1134
1135 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1136 };
1137
1138 static const struct genl_ops vdpa_nl_ops[] = {
1139 {
1140 .cmd = VDPA_CMD_MGMTDEV_GET,
1141 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1142 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1143 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1144 },
1145 {
1146 .cmd = VDPA_CMD_DEV_NEW,
1147 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1148 .doit = vdpa_nl_cmd_dev_add_set_doit,
1149 .flags = GENL_ADMIN_PERM,
1150 },
1151 {
1152 .cmd = VDPA_CMD_DEV_DEL,
1153 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1154 .doit = vdpa_nl_cmd_dev_del_set_doit,
1155 .flags = GENL_ADMIN_PERM,
1156 },
1157 {
1158 .cmd = VDPA_CMD_DEV_GET,
1159 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1160 .doit = vdpa_nl_cmd_dev_get_doit,
1161 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1162 },
1163 {
1164 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1165 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1166 .doit = vdpa_nl_cmd_dev_config_get_doit,
1167 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1168 },
1169 {
1170 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1171 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1172 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1173 .flags = GENL_ADMIN_PERM,
1174 },
1175 };
1176
1177 static struct genl_family vdpa_nl_family __ro_after_init = {
1178 .name = VDPA_GENL_NAME,
1179 .version = VDPA_GENL_VERSION,
1180 .maxattr = VDPA_ATTR_MAX,
1181 .policy = vdpa_nl_policy,
1182 .netnsok = false,
1183 .module = THIS_MODULE,
1184 .ops = vdpa_nl_ops,
1185 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1186 };
1187
1188 static int vdpa_init(void)
1189 {
1190 int err;
1191
1192 err = bus_register(&vdpa_bus);
1193 if (err)
1194 return err;
1195 err = genl_register_family(&vdpa_nl_family);
1196 if (err)
1197 goto err;
1198 return 0;
1199
1200 err:
1201 bus_unregister(&vdpa_bus);
1202 return err;
1203 }
1204
1205 static void __exit vdpa_exit(void)
1206 {
1207 genl_unregister_family(&vdpa_nl_family);
1208 bus_unregister(&vdpa_bus);
1209 ida_destroy(&vdpa_index_ida);
1210 }
1211 core_initcall(vdpa_init);
1212 module_exit(vdpa_exit);
1213
1214 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1215 MODULE_LICENSE("GPL v2");