0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/completion.h>
0021 #include <linux/errno.h>
0022 #include <linux/refcount.h>
0023 #include <linux/slab.h>
0024 #include <linux/virtio.h>
0025 #include <linux/virtio_config.h>
0026
0027 #include <uapi/linux/virtio_ids.h>
0028 #include <uapi/linux/virtio_scmi.h>
0029
0030 #include "common.h"
0031
0032 #define VIRTIO_MAX_RX_TIMEOUT_MS 60000
0033 #define VIRTIO_SCMI_MAX_MSG_SIZE 128
0034 #define VIRTIO_SCMI_MAX_PDU_SIZE \
0035 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
0036 #define DESCRIPTORS_PER_TX_MSG 2
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 struct scmi_vio_channel {
0057 struct virtqueue *vqueue;
0058 struct scmi_chan_info *cinfo;
0059
0060 spinlock_t free_lock;
0061 struct list_head free_list;
0062
0063 spinlock_t pending_lock;
0064 struct list_head pending_cmds_list;
0065 struct work_struct deferred_tx_work;
0066 struct workqueue_struct *deferred_tx_wq;
0067 bool is_rx;
0068 unsigned int max_msg;
0069
0070
0071
0072
0073 spinlock_t lock;
0074 struct completion *shutdown_done;
0075 refcount_t users;
0076 };
0077
0078 enum poll_states {
0079 VIO_MSG_NOT_POLLED,
0080 VIO_MSG_POLL_TIMEOUT,
0081 VIO_MSG_POLLING,
0082 VIO_MSG_POLL_DONE,
0083 };
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 struct scmi_vio_msg {
0100 struct scmi_msg_payld *request;
0101 struct scmi_msg_payld *input;
0102 struct list_head list;
0103 unsigned int rx_len;
0104 unsigned int poll_idx;
0105 enum poll_states poll_status;
0106
0107 spinlock_t poll_lock;
0108 refcount_t users;
0109 };
0110
0111
0112 static struct virtio_device *scmi_vdev;
0113
0114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
0115 struct scmi_chan_info *cinfo)
0116 {
0117 unsigned long flags;
0118
0119 spin_lock_irqsave(&vioch->lock, flags);
0120 cinfo->transport_info = vioch;
0121
0122 vioch->cinfo = cinfo;
0123 spin_unlock_irqrestore(&vioch->lock, flags);
0124
0125 refcount_set(&vioch->users, 1);
0126 }
0127
0128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
0129 {
0130 return refcount_inc_not_zero(&vioch->users);
0131 }
0132
0133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
0134 {
0135 if (refcount_dec_and_test(&vioch->users)) {
0136 unsigned long flags;
0137
0138 spin_lock_irqsave(&vioch->lock, flags);
0139 if (vioch->shutdown_done) {
0140 vioch->cinfo = NULL;
0141 complete(vioch->shutdown_done);
0142 }
0143 spin_unlock_irqrestore(&vioch->lock, flags);
0144 }
0145 }
0146
0147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
0148 {
0149 unsigned long flags;
0150 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
0151 void *deferred_wq = NULL;
0152
0153
0154
0155
0156
0157 spin_lock_irqsave(&vioch->lock, flags);
0158 if (!vioch->cinfo || vioch->shutdown_done) {
0159 spin_unlock_irqrestore(&vioch->lock, flags);
0160 return;
0161 }
0162
0163 vioch->shutdown_done = &vioch_shutdown_done;
0164 virtio_break_device(vioch->vqueue->vdev);
0165 if (!vioch->is_rx && vioch->deferred_tx_wq) {
0166 deferred_wq = vioch->deferred_tx_wq;
0167
0168 vioch->deferred_tx_wq = NULL;
0169 }
0170 spin_unlock_irqrestore(&vioch->lock, flags);
0171
0172 if (deferred_wq)
0173 destroy_workqueue(deferred_wq);
0174
0175 scmi_vio_channel_release(vioch);
0176
0177
0178 wait_for_completion(vioch->shutdown_done);
0179 }
0180
0181
0182 static struct scmi_vio_msg *
0183 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
0184 {
0185 unsigned long flags;
0186 struct scmi_vio_msg *msg;
0187
0188 spin_lock_irqsave(&vioch->free_lock, flags);
0189 if (list_empty(&vioch->free_list)) {
0190 spin_unlock_irqrestore(&vioch->free_lock, flags);
0191 return NULL;
0192 }
0193
0194 msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
0195 list_del_init(&msg->list);
0196 spin_unlock_irqrestore(&vioch->free_lock, flags);
0197
0198
0199 msg->poll_status = VIO_MSG_NOT_POLLED;
0200 refcount_set(&msg->users, 1);
0201
0202 return msg;
0203 }
0204
0205 static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
0206 {
0207 return refcount_inc_not_zero(&msg->users);
0208 }
0209
0210
0211 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
0212 struct scmi_vio_msg *msg)
0213 {
0214 bool ret;
0215
0216 ret = refcount_dec_and_test(&msg->users);
0217 if (ret) {
0218 unsigned long flags;
0219
0220 spin_lock_irqsave(&vioch->free_lock, flags);
0221 list_add_tail(&msg->list, &vioch->free_list);
0222 spin_unlock_irqrestore(&vioch->free_lock, flags);
0223 }
0224
0225 return ret;
0226 }
0227
0228 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
0229 {
0230 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
0231 }
0232
0233 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
0234 struct scmi_vio_msg *msg)
0235 {
0236 struct scatterlist sg_in;
0237 int rc;
0238 unsigned long flags;
0239 struct device *dev = &vioch->vqueue->vdev->dev;
0240
0241 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
0242
0243 spin_lock_irqsave(&vioch->lock, flags);
0244
0245 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
0246 if (rc)
0247 dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
0248 else
0249 virtqueue_kick(vioch->vqueue);
0250
0251 spin_unlock_irqrestore(&vioch->lock, flags);
0252
0253 return rc;
0254 }
0255
0256
0257
0258
0259
0260 static void scmi_finalize_message(struct scmi_vio_channel *vioch,
0261 struct scmi_vio_msg *msg)
0262 {
0263 if (vioch->is_rx)
0264 scmi_vio_feed_vq_rx(vioch, msg);
0265 else
0266 scmi_vio_msg_release(vioch, msg);
0267 }
0268
0269 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
0270 {
0271 unsigned long flags;
0272 unsigned int length;
0273 struct scmi_vio_channel *vioch;
0274 struct scmi_vio_msg *msg;
0275 bool cb_enabled = true;
0276
0277 if (WARN_ON_ONCE(!vqueue->vdev->priv))
0278 return;
0279 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
0280
0281 for (;;) {
0282 if (!scmi_vio_channel_acquire(vioch))
0283 return;
0284
0285 spin_lock_irqsave(&vioch->lock, flags);
0286 if (cb_enabled) {
0287 virtqueue_disable_cb(vqueue);
0288 cb_enabled = false;
0289 }
0290
0291 msg = virtqueue_get_buf(vqueue, &length);
0292 if (!msg) {
0293 if (virtqueue_enable_cb(vqueue)) {
0294 spin_unlock_irqrestore(&vioch->lock, flags);
0295 scmi_vio_channel_release(vioch);
0296 return;
0297 }
0298 cb_enabled = true;
0299 }
0300 spin_unlock_irqrestore(&vioch->lock, flags);
0301
0302 if (msg) {
0303 msg->rx_len = length;
0304 scmi_rx_callback(vioch->cinfo,
0305 msg_read_header(msg->input), msg);
0306
0307 scmi_finalize_message(vioch, msg);
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317 scmi_vio_channel_release(vioch);
0318 }
0319 }
0320
0321 static void scmi_vio_deferred_tx_worker(struct work_struct *work)
0322 {
0323 unsigned long flags;
0324 struct scmi_vio_channel *vioch;
0325 struct scmi_vio_msg *msg, *tmp;
0326
0327 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
0328
0329 if (!scmi_vio_channel_acquire(vioch))
0330 return;
0331
0332
0333
0334
0335
0336
0337
0338 spin_lock_irqsave(&vioch->pending_lock, flags);
0339
0340
0341 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
0342 list_del(&msg->list);
0343
0344
0345
0346
0347
0348 if (msg->poll_status == VIO_MSG_NOT_POLLED)
0349 scmi_rx_callback(vioch->cinfo,
0350 msg_read_header(msg->input), msg);
0351
0352
0353 scmi_vio_msg_release(vioch, msg);
0354 }
0355
0356 spin_unlock_irqrestore(&vioch->pending_lock, flags);
0357
0358
0359 scmi_vio_complete_cb(vioch->vqueue);
0360
0361 scmi_vio_channel_release(vioch);
0362 }
0363
0364 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
0365
0366 static vq_callback_t *scmi_vio_complete_callbacks[] = {
0367 scmi_vio_complete_cb,
0368 scmi_vio_complete_cb
0369 };
0370
0371 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
0372 {
0373 struct scmi_vio_channel *vioch = base_cinfo->transport_info;
0374
0375 return vioch->max_msg;
0376 }
0377
0378 static int virtio_link_supplier(struct device *dev)
0379 {
0380 if (!scmi_vdev) {
0381 dev_notice(dev,
0382 "Deferring probe after not finding a bound scmi-virtio device\n");
0383 return -EPROBE_DEFER;
0384 }
0385
0386 if (!device_link_add(dev, &scmi_vdev->dev,
0387 DL_FLAG_AUTOREMOVE_CONSUMER)) {
0388 dev_err(dev, "Adding link to supplier virtio device failed\n");
0389 return -ECANCELED;
0390 }
0391
0392 return 0;
0393 }
0394
0395 static bool virtio_chan_available(struct device *dev, int idx)
0396 {
0397 struct scmi_vio_channel *channels, *vioch = NULL;
0398
0399 if (WARN_ON_ONCE(!scmi_vdev))
0400 return false;
0401
0402 channels = (struct scmi_vio_channel *)scmi_vdev->priv;
0403
0404 switch (idx) {
0405 case VIRTIO_SCMI_VQ_TX:
0406 vioch = &channels[VIRTIO_SCMI_VQ_TX];
0407 break;
0408 case VIRTIO_SCMI_VQ_RX:
0409 if (scmi_vio_have_vq_rx(scmi_vdev))
0410 vioch = &channels[VIRTIO_SCMI_VQ_RX];
0411 break;
0412 default:
0413 return false;
0414 }
0415
0416 return vioch && !vioch->cinfo;
0417 }
0418
0419 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
0420 bool tx)
0421 {
0422 struct scmi_vio_channel *vioch;
0423 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
0424 int i;
0425
0426 if (!scmi_vdev)
0427 return -EPROBE_DEFER;
0428
0429 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
0430
0431
0432 if (tx && !vioch->deferred_tx_wq) {
0433 vioch->deferred_tx_wq =
0434 alloc_workqueue(dev_name(&scmi_vdev->dev),
0435 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0436 0);
0437 if (!vioch->deferred_tx_wq)
0438 return -ENOMEM;
0439
0440 INIT_WORK(&vioch->deferred_tx_work,
0441 scmi_vio_deferred_tx_worker);
0442 }
0443
0444 for (i = 0; i < vioch->max_msg; i++) {
0445 struct scmi_vio_msg *msg;
0446
0447 msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
0448 if (!msg)
0449 return -ENOMEM;
0450
0451 if (tx) {
0452 msg->request = devm_kzalloc(cinfo->dev,
0453 VIRTIO_SCMI_MAX_PDU_SIZE,
0454 GFP_KERNEL);
0455 if (!msg->request)
0456 return -ENOMEM;
0457 spin_lock_init(&msg->poll_lock);
0458 refcount_set(&msg->users, 1);
0459 }
0460
0461 msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
0462 GFP_KERNEL);
0463 if (!msg->input)
0464 return -ENOMEM;
0465
0466 scmi_finalize_message(vioch, msg);
0467 }
0468
0469 scmi_vio_channel_ready(vioch, cinfo);
0470
0471 return 0;
0472 }
0473
0474 static int virtio_chan_free(int id, void *p, void *data)
0475 {
0476 struct scmi_chan_info *cinfo = p;
0477 struct scmi_vio_channel *vioch = cinfo->transport_info;
0478
0479 scmi_vio_channel_cleanup_sync(vioch);
0480
0481 scmi_free_channel(cinfo, data, id);
0482
0483 return 0;
0484 }
0485
0486 static int virtio_send_message(struct scmi_chan_info *cinfo,
0487 struct scmi_xfer *xfer)
0488 {
0489 struct scmi_vio_channel *vioch = cinfo->transport_info;
0490 struct scatterlist sg_out;
0491 struct scatterlist sg_in;
0492 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
0493 unsigned long flags;
0494 int rc;
0495 struct scmi_vio_msg *msg;
0496
0497 if (!scmi_vio_channel_acquire(vioch))
0498 return -EINVAL;
0499
0500 msg = scmi_virtio_get_free_msg(vioch);
0501 if (!msg) {
0502 scmi_vio_channel_release(vioch);
0503 return -EBUSY;
0504 }
0505
0506 msg_tx_prepare(msg->request, xfer);
0507
0508 sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
0509 sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
0510
0511 spin_lock_irqsave(&vioch->lock, flags);
0512
0513
0514
0515
0516
0517
0518
0519 if (xfer->hdr.poll_completion) {
0520 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
0521
0522 msg->poll_status = VIO_MSG_POLLING;
0523 scmi_vio_msg_acquire(msg);
0524
0525 smp_store_mb(xfer->priv, msg);
0526 }
0527
0528 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
0529 if (rc)
0530 dev_err(vioch->cinfo->dev,
0531 "failed to add to TX virtqueue (%d)\n", rc);
0532 else
0533 virtqueue_kick(vioch->vqueue);
0534
0535 spin_unlock_irqrestore(&vioch->lock, flags);
0536
0537 if (rc) {
0538
0539 smp_store_mb(xfer->priv, NULL);
0540 if (xfer->hdr.poll_completion)
0541 scmi_vio_msg_release(vioch, msg);
0542 scmi_vio_msg_release(vioch, msg);
0543 }
0544
0545 scmi_vio_channel_release(vioch);
0546
0547 return rc;
0548 }
0549
0550 static void virtio_fetch_response(struct scmi_chan_info *cinfo,
0551 struct scmi_xfer *xfer)
0552 {
0553 struct scmi_vio_msg *msg = xfer->priv;
0554
0555 if (msg)
0556 msg_fetch_response(msg->input, msg->rx_len, xfer);
0557 }
0558
0559 static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
0560 size_t max_len, struct scmi_xfer *xfer)
0561 {
0562 struct scmi_vio_msg *msg = xfer->priv;
0563
0564 if (msg)
0565 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603 static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
0604 struct scmi_xfer *xfer)
0605 {
0606 unsigned long flags;
0607 struct scmi_vio_channel *vioch = cinfo->transport_info;
0608 struct scmi_vio_msg *msg = xfer->priv;
0609
0610 if (!msg || !scmi_vio_channel_acquire(vioch))
0611 return;
0612
0613
0614 smp_store_mb(xfer->priv, NULL);
0615
0616
0617 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
0618 scmi_vio_channel_release(vioch);
0619 return;
0620 }
0621
0622 spin_lock_irqsave(&msg->poll_lock, flags);
0623
0624 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
0625 scmi_vio_msg_release(vioch, msg);
0626 else if (msg->poll_status == VIO_MSG_POLLING)
0627 msg->poll_status = VIO_MSG_POLL_TIMEOUT;
0628 spin_unlock_irqrestore(&msg->poll_lock, flags);
0629
0630 scmi_vio_channel_release(vioch);
0631 }
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670 static bool virtio_poll_done(struct scmi_chan_info *cinfo,
0671 struct scmi_xfer *xfer)
0672 {
0673 bool pending, found = false;
0674 unsigned int length, any_prefetched = 0;
0675 unsigned long flags;
0676 struct scmi_vio_msg *next_msg, *msg = xfer->priv;
0677 struct scmi_vio_channel *vioch = cinfo->transport_info;
0678
0679 if (!msg)
0680 return true;
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 if (msg->poll_status == VIO_MSG_POLL_DONE)
0698 return true;
0699
0700 if (!scmi_vio_channel_acquire(vioch))
0701 return true;
0702
0703
0704 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
0705 if (!pending) {
0706 scmi_vio_channel_release(vioch);
0707 return false;
0708 }
0709
0710 spin_lock_irqsave(&vioch->lock, flags);
0711 virtqueue_disable_cb(vioch->vqueue);
0712
0713
0714
0715
0716
0717 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
0718 bool next_msg_done = false;
0719
0720
0721
0722
0723
0724 spin_lock(&next_msg->poll_lock);
0725 if (next_msg->poll_status == VIO_MSG_POLLING) {
0726 next_msg->poll_status = VIO_MSG_POLL_DONE;
0727 next_msg_done = true;
0728 }
0729 spin_unlock(&next_msg->poll_lock);
0730
0731 next_msg->rx_len = length;
0732
0733 if (next_msg == msg) {
0734 found = true;
0735 break;
0736 } else if (next_msg_done) {
0737
0738 continue;
0739 }
0740
0741
0742
0743
0744
0745 spin_lock(&next_msg->poll_lock);
0746 if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
0747 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
0748 spin_unlock(&next_msg->poll_lock);
0749
0750 any_prefetched++;
0751 spin_lock(&vioch->pending_lock);
0752 list_add_tail(&next_msg->list,
0753 &vioch->pending_cmds_list);
0754 spin_unlock(&vioch->pending_lock);
0755 } else {
0756 spin_unlock(&next_msg->poll_lock);
0757 }
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 if (found) {
0769 pending = !virtqueue_enable_cb(vioch->vqueue);
0770 } else {
0771 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
0772 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
0773 }
0774
0775 if (vioch->deferred_tx_wq && (any_prefetched || pending))
0776 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
0777
0778 spin_unlock_irqrestore(&vioch->lock, flags);
0779
0780 scmi_vio_channel_release(vioch);
0781
0782 return found;
0783 }
0784
0785 static const struct scmi_transport_ops scmi_virtio_ops = {
0786 .link_supplier = virtio_link_supplier,
0787 .chan_available = virtio_chan_available,
0788 .chan_setup = virtio_chan_setup,
0789 .chan_free = virtio_chan_free,
0790 .get_max_msg = virtio_get_max_msg,
0791 .send_message = virtio_send_message,
0792 .fetch_response = virtio_fetch_response,
0793 .fetch_notification = virtio_fetch_notification,
0794 .mark_txdone = virtio_mark_txdone,
0795 .poll_done = virtio_poll_done,
0796 };
0797
0798 static int scmi_vio_probe(struct virtio_device *vdev)
0799 {
0800 struct device *dev = &vdev->dev;
0801 struct scmi_vio_channel *channels;
0802 bool have_vq_rx;
0803 int vq_cnt;
0804 int i;
0805 int ret;
0806 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
0807
0808
0809 if (scmi_vdev) {
0810 dev_err(dev,
0811 "One SCMI Virtio device was already initialized: only one allowed.\n");
0812 return -EBUSY;
0813 }
0814
0815 have_vq_rx = scmi_vio_have_vq_rx(vdev);
0816 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
0817
0818 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
0819 if (!channels)
0820 return -ENOMEM;
0821
0822 if (have_vq_rx)
0823 channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
0824
0825 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
0826 scmi_vio_vqueue_names, NULL);
0827 if (ret) {
0828 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
0829 return ret;
0830 }
0831
0832 for (i = 0; i < vq_cnt; i++) {
0833 unsigned int sz;
0834
0835 spin_lock_init(&channels[i].lock);
0836 spin_lock_init(&channels[i].free_lock);
0837 INIT_LIST_HEAD(&channels[i].free_list);
0838 spin_lock_init(&channels[i].pending_lock);
0839 INIT_LIST_HEAD(&channels[i].pending_cmds_list);
0840 channels[i].vqueue = vqs[i];
0841
0842 sz = virtqueue_get_vring_size(channels[i].vqueue);
0843
0844 if (!channels[i].is_rx)
0845 sz /= DESCRIPTORS_PER_TX_MSG;
0846
0847 if (sz > MSG_TOKEN_MAX) {
0848 dev_info(dev,
0849 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
0850 channels[i].is_rx ? "rx" : "tx",
0851 sz, MSG_TOKEN_MAX);
0852 sz = MSG_TOKEN_MAX;
0853 }
0854 channels[i].max_msg = sz;
0855 }
0856
0857 vdev->priv = channels;
0858
0859 smp_store_mb(scmi_vdev, vdev);
0860
0861 return 0;
0862 }
0863
0864 static void scmi_vio_remove(struct virtio_device *vdev)
0865 {
0866
0867
0868
0869
0870
0871
0872
0873 virtio_reset_device(vdev);
0874 vdev->config->del_vqs(vdev);
0875
0876 smp_store_mb(scmi_vdev, NULL);
0877 }
0878
0879 static int scmi_vio_validate(struct virtio_device *vdev)
0880 {
0881 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
0882 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
0883 dev_err(&vdev->dev,
0884 "device does not comply with spec version 1.x\n");
0885 return -EINVAL;
0886 }
0887 #endif
0888 return 0;
0889 }
0890
0891 static unsigned int features[] = {
0892 VIRTIO_SCMI_F_P2A_CHANNELS,
0893 };
0894
0895 static const struct virtio_device_id id_table[] = {
0896 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
0897 { 0 }
0898 };
0899
0900 static struct virtio_driver virtio_scmi_driver = {
0901 .driver.name = "scmi-virtio",
0902 .driver.owner = THIS_MODULE,
0903 .feature_table = features,
0904 .feature_table_size = ARRAY_SIZE(features),
0905 .id_table = id_table,
0906 .probe = scmi_vio_probe,
0907 .remove = scmi_vio_remove,
0908 .validate = scmi_vio_validate,
0909 };
0910
0911 static int __init virtio_scmi_init(void)
0912 {
0913 return register_virtio_driver(&virtio_scmi_driver);
0914 }
0915
0916 static void virtio_scmi_exit(void)
0917 {
0918 unregister_virtio_driver(&virtio_scmi_driver);
0919 }
0920
0921 const struct scmi_desc scmi_virtio_desc = {
0922 .transport_init = virtio_scmi_init,
0923 .transport_exit = virtio_scmi_exit,
0924 .ops = &scmi_virtio_ops,
0925
0926 .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
0927 .max_msg = 0,
0928 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
0929 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
0930 };