0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/bitmap.h>
0018 #include <linux/device.h>
0019 #include <linux/export.h>
0020 #include <linux/idr.h>
0021 #include <linux/io.h>
0022 #include <linux/io-64-nonatomic-hi-lo.h>
0023 #include <linux/kernel.h>
0024 #include <linux/ktime.h>
0025 #include <linux/hashtable.h>
0026 #include <linux/list.h>
0027 #include <linux/module.h>
0028 #include <linux/of_address.h>
0029 #include <linux/of_device.h>
0030 #include <linux/processor.h>
0031 #include <linux/refcount.h>
0032 #include <linux/slab.h>
0033
0034 #include "common.h"
0035 #include "notify.h"
0036
0037 #define CREATE_TRACE_POINTS
0038 #include <trace/events/scmi.h>
0039
0040 enum scmi_error_codes {
0041 SCMI_SUCCESS = 0,
0042 SCMI_ERR_SUPPORT = -1,
0043 SCMI_ERR_PARAMS = -2,
0044 SCMI_ERR_ACCESS = -3,
0045 SCMI_ERR_ENTRY = -4,
0046 SCMI_ERR_RANGE = -5,
0047 SCMI_ERR_BUSY = -6,
0048 SCMI_ERR_COMMS = -7,
0049 SCMI_ERR_GENERIC = -8,
0050 SCMI_ERR_HARDWARE = -9,
0051 SCMI_ERR_PROTOCOL = -10,
0052 };
0053
0054
0055 static LIST_HEAD(scmi_list);
0056
0057 static DEFINE_MUTEX(scmi_list_mutex);
0058
0059 static atomic_t transfer_last_id;
0060
0061 static DEFINE_IDR(scmi_requested_devices);
0062 static DEFINE_MUTEX(scmi_requested_devices_mtx);
0063
0064
0065 static bool scmi_syspower_registered;
0066
0067 static DEFINE_MUTEX(scmi_syspower_mtx);
0068
0069 struct scmi_requested_dev {
0070 const struct scmi_device_id *id_table;
0071 struct list_head node;
0072 };
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 struct scmi_xfers_info {
0089 unsigned long *xfer_alloc_table;
0090 spinlock_t xfer_lock;
0091 int max_msg;
0092 struct hlist_head free_xfers;
0093 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
0094 };
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 struct scmi_protocol_instance {
0110 const struct scmi_handle *handle;
0111 const struct scmi_protocol *proto;
0112 void *gid;
0113 refcount_t users;
0114 void *priv;
0115 struct scmi_protocol_handle ph;
0116 };
0117
0118 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 struct scmi_info {
0152 struct device *dev;
0153 const struct scmi_desc *desc;
0154 struct scmi_revision_info version;
0155 struct scmi_handle handle;
0156 struct scmi_xfers_info tx_minfo;
0157 struct scmi_xfers_info rx_minfo;
0158 struct idr tx_idr;
0159 struct idr rx_idr;
0160 struct idr protocols;
0161
0162 struct mutex protocols_mtx;
0163 u8 *protocols_imp;
0164 struct idr active_protocols;
0165 unsigned int atomic_threshold;
0166 void *notify_priv;
0167 struct list_head node;
0168 int users;
0169 };
0170
0171 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
0172
0173 static const int scmi_linux_errmap[] = {
0174
0175 0,
0176 -EOPNOTSUPP,
0177 -EINVAL,
0178 -EACCES,
0179 -ENOENT,
0180 -ERANGE,
0181 -EBUSY,
0182 -ECOMM,
0183 -EIO,
0184 -EREMOTEIO,
0185 -EPROTO,
0186 };
0187
0188 static inline int scmi_to_linux_errno(int errno)
0189 {
0190 int err_idx = -errno;
0191
0192 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
0193 return scmi_linux_errmap[err_idx];
0194 return -EIO;
0195 }
0196
0197 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
0198 void *priv)
0199 {
0200 struct scmi_info *info = handle_to_scmi_info(handle);
0201
0202 info->notify_priv = priv;
0203
0204 smp_wmb();
0205 }
0206
0207 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
0208 {
0209 struct scmi_info *info = handle_to_scmi_info(handle);
0210
0211
0212 smp_rmb();
0213 return info->notify_priv;
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
0276 struct scmi_xfer *xfer)
0277 {
0278 unsigned long xfer_id, next_token;
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
0289
0290
0291 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
0292 MSG_TOKEN_MAX, next_token);
0293 if (xfer_id == MSG_TOKEN_MAX) {
0294
0295
0296
0297
0298
0299 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
0300 MSG_TOKEN_MAX, 0);
0301
0302
0303
0304
0305
0306 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
0307 return -ENOMEM;
0308 }
0309
0310
0311 if (xfer_id != next_token)
0312 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
0313
0314
0315 set_bit(xfer_id, minfo->xfer_alloc_table);
0316 xfer->hdr.seq = (u16)xfer_id;
0317
0318 return 0;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
0328 struct scmi_xfer *xfer)
0329 {
0330 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
0356 struct scmi_xfers_info *minfo,
0357 bool set_pending)
0358 {
0359 int ret;
0360 unsigned long flags;
0361 struct scmi_xfer *xfer;
0362
0363 spin_lock_irqsave(&minfo->xfer_lock, flags);
0364 if (hlist_empty(&minfo->free_xfers)) {
0365 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0366 return ERR_PTR(-ENOMEM);
0367 }
0368
0369
0370 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
0371 hlist_del_init(&xfer->node);
0372
0373
0374
0375
0376
0377 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
0378
0379 if (set_pending) {
0380
0381 ret = scmi_xfer_token_set(minfo, xfer);
0382 if (!ret) {
0383 hash_add(minfo->pending_xfers, &xfer->node,
0384 xfer->hdr.seq);
0385 xfer->pending = true;
0386 } else {
0387 dev_err(handle->dev,
0388 "Failed to get monotonic token %d\n", ret);
0389 hlist_add_head(&xfer->node, &minfo->free_xfers);
0390 xfer = ERR_PTR(ret);
0391 }
0392 }
0393
0394 if (!IS_ERR(xfer)) {
0395 refcount_set(&xfer->users, 1);
0396 atomic_set(&xfer->busy, SCMI_XFER_FREE);
0397 }
0398 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0399
0400 return xfer;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 static void
0415 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
0416 {
0417 unsigned long flags;
0418
0419 spin_lock_irqsave(&minfo->xfer_lock, flags);
0420 if (refcount_dec_and_test(&xfer->users)) {
0421 if (xfer->pending) {
0422 scmi_xfer_token_clear(minfo, xfer);
0423 hash_del(&xfer->node);
0424 xfer->pending = false;
0425 }
0426 hlist_add_head(&xfer->node, &minfo->free_xfers);
0427 }
0428 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 static struct scmi_xfer *
0444 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
0445 {
0446 struct scmi_xfer *xfer = NULL;
0447
0448 if (test_bit(xfer_id, minfo->xfer_alloc_table))
0449 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
0450
0451 return xfer ?: ERR_PTR(-EINVAL);
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
0474 u8 msg_type,
0475 struct scmi_xfer *xfer)
0476 {
0477
0478
0479
0480
0481
0482
0483 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
0484 dev_err(cinfo->dev,
0485 "Delayed Response for %d not expected! Buggy F/W ?\n",
0486 xfer->hdr.seq);
0487 return -EINVAL;
0488 }
0489
0490 switch (xfer->state) {
0491 case SCMI_XFER_SENT_OK:
0492 if (msg_type == MSG_TYPE_DELAYED_RESP) {
0493
0494
0495
0496
0497 xfer->hdr.status = SCMI_SUCCESS;
0498 xfer->state = SCMI_XFER_RESP_OK;
0499 complete(&xfer->done);
0500 dev_warn(cinfo->dev,
0501 "Received valid OoO Delayed Response for %d\n",
0502 xfer->hdr.seq);
0503 }
0504 break;
0505 case SCMI_XFER_RESP_OK:
0506 if (msg_type != MSG_TYPE_DELAYED_RESP)
0507 return -EINVAL;
0508 break;
0509 case SCMI_XFER_DRESP_OK:
0510
0511 return -EINVAL;
0512 }
0513
0514 return 0;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
0530 {
0531 xfer->hdr.type = msg_type;
0532
0533
0534 if (xfer->hdr.type == MSG_TYPE_COMMAND)
0535 xfer->state = SCMI_XFER_RESP_OK;
0536 else
0537 xfer->state = SCMI_XFER_DRESP_OK;
0538 }
0539
0540 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
0541 {
0542 int ret;
0543
0544 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
0545
0546 return ret == SCMI_XFER_FREE;
0547 }
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 static inline struct scmi_xfer *
0562 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
0563 {
0564 int ret;
0565 unsigned long flags;
0566 struct scmi_xfer *xfer;
0567 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
0568 struct scmi_xfers_info *minfo = &info->tx_minfo;
0569 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
0570 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
0571
0572
0573 spin_lock_irqsave(&minfo->xfer_lock, flags);
0574 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
0575 if (IS_ERR(xfer)) {
0576 dev_err(cinfo->dev,
0577 "Message for %d type %d is not expected!\n",
0578 xfer_id, msg_type);
0579 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0580 return xfer;
0581 }
0582 refcount_inc(&xfer->users);
0583 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0584
0585 spin_lock_irqsave(&xfer->lock, flags);
0586 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
0587
0588
0589
0590
0591
0592
0593
0594 if (!ret) {
0595 spin_until_cond(scmi_xfer_acquired(xfer));
0596 scmi_xfer_state_update(xfer, msg_type);
0597 }
0598 spin_unlock_irqrestore(&xfer->lock, flags);
0599
0600 if (ret) {
0601 dev_err(cinfo->dev,
0602 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
0603 msg_type, xfer_id, msg_hdr, xfer->state);
0604
0605 __scmi_xfer_put(minfo, xfer);
0606 xfer = ERR_PTR(-EINVAL);
0607 }
0608
0609 return xfer;
0610 }
0611
0612 static inline void scmi_xfer_command_release(struct scmi_info *info,
0613 struct scmi_xfer *xfer)
0614 {
0615 atomic_set(&xfer->busy, SCMI_XFER_FREE);
0616 __scmi_xfer_put(&info->tx_minfo, xfer);
0617 }
0618
0619 static inline void scmi_clear_channel(struct scmi_info *info,
0620 struct scmi_chan_info *cinfo)
0621 {
0622 if (info->desc->ops->clear_channel)
0623 info->desc->ops->clear_channel(cinfo);
0624 }
0625
0626 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
0627 struct scmi_info *info)
0628 {
0629 return cinfo->no_completion_irq || info->desc->force_polling;
0630 }
0631
0632 static inline bool is_transport_polling_capable(struct scmi_info *info)
0633 {
0634 return info->desc->ops->poll_done ||
0635 info->desc->sync_cmds_completed_on_ret;
0636 }
0637
0638 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
0639 struct scmi_info *info)
0640 {
0641 return is_polling_required(cinfo, info) &&
0642 is_transport_polling_capable(info);
0643 }
0644
0645 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
0646 u32 msg_hdr, void *priv)
0647 {
0648 struct scmi_xfer *xfer;
0649 struct device *dev = cinfo->dev;
0650 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
0651 struct scmi_xfers_info *minfo = &info->rx_minfo;
0652 ktime_t ts;
0653
0654 ts = ktime_get_boottime();
0655 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
0656 if (IS_ERR(xfer)) {
0657 dev_err(dev, "failed to get free message slot (%ld)\n",
0658 PTR_ERR(xfer));
0659 scmi_clear_channel(info, cinfo);
0660 return;
0661 }
0662
0663 unpack_scmi_header(msg_hdr, &xfer->hdr);
0664 if (priv)
0665
0666 smp_store_mb(xfer->priv, priv);
0667 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
0668 xfer);
0669
0670 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
0671 xfer->hdr.seq, xfer->hdr.status,
0672 xfer->rx.buf, xfer->rx.len);
0673
0674 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
0675 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
0676
0677 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
0678 xfer->hdr.protocol_id, xfer->hdr.seq,
0679 MSG_TYPE_NOTIFICATION);
0680
0681 __scmi_xfer_put(minfo, xfer);
0682
0683 scmi_clear_channel(info, cinfo);
0684 }
0685
0686 static void scmi_handle_response(struct scmi_chan_info *cinfo,
0687 u32 msg_hdr, void *priv)
0688 {
0689 struct scmi_xfer *xfer;
0690 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
0691
0692 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
0693 if (IS_ERR(xfer)) {
0694 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
0695 scmi_clear_channel(info, cinfo);
0696 return;
0697 }
0698
0699
0700 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
0701 xfer->rx.len = info->desc->max_msg_size;
0702
0703 if (priv)
0704
0705 smp_store_mb(xfer->priv, priv);
0706 info->desc->ops->fetch_response(cinfo, xfer);
0707
0708 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
0709 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
0710 "DLYD" : "RESP",
0711 xfer->hdr.seq, xfer->hdr.status,
0712 xfer->rx.buf, xfer->rx.len);
0713
0714 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
0715 xfer->hdr.protocol_id, xfer->hdr.seq,
0716 xfer->hdr.type);
0717
0718 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
0719 scmi_clear_channel(info, cinfo);
0720 complete(xfer->async_done);
0721 } else {
0722 complete(&xfer->done);
0723 }
0724
0725 scmi_xfer_command_release(info, xfer);
0726 }
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
0742 {
0743 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
0744
0745 switch (msg_type) {
0746 case MSG_TYPE_NOTIFICATION:
0747 scmi_handle_notification(cinfo, msg_hdr, priv);
0748 break;
0749 case MSG_TYPE_COMMAND:
0750 case MSG_TYPE_DELAYED_RESP:
0751 scmi_handle_response(cinfo, msg_hdr, priv);
0752 break;
0753 default:
0754 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
0755 break;
0756 }
0757 }
0758
0759
0760
0761
0762
0763
0764
0765 static void xfer_put(const struct scmi_protocol_handle *ph,
0766 struct scmi_xfer *xfer)
0767 {
0768 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
0769 struct scmi_info *info = handle_to_scmi_info(pi->handle);
0770
0771 __scmi_xfer_put(&info->tx_minfo, xfer);
0772 }
0773
0774 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
0775 struct scmi_xfer *xfer, ktime_t stop)
0776 {
0777 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
0778
0779
0780
0781
0782
0783 return info->desc->ops->poll_done(cinfo, xfer) ||
0784 try_wait_for_completion(&xfer->done) ||
0785 ktime_after(ktime_get(), stop);
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
0801 struct scmi_xfer *xfer)
0802 {
0803 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
0804 struct device *dev = info->dev;
0805 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
0806
0807 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
0808 xfer->hdr.protocol_id, xfer->hdr.seq,
0809 timeout_ms,
0810 xfer->hdr.poll_completion);
0811
0812 if (xfer->hdr.poll_completion) {
0813
0814
0815
0816
0817 if (!info->desc->sync_cmds_completed_on_ret) {
0818
0819
0820
0821
0822 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
0823
0824 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
0825 xfer, stop));
0826 if (ktime_after(ktime_get(), stop)) {
0827 dev_err(dev,
0828 "timed out in resp(caller: %pS) - polling\n",
0829 (void *)_RET_IP_);
0830 ret = -ETIMEDOUT;
0831 }
0832 }
0833
0834 if (!ret) {
0835 unsigned long flags;
0836
0837
0838
0839
0840
0841 spin_lock_irqsave(&xfer->lock, flags);
0842 if (xfer->state == SCMI_XFER_SENT_OK) {
0843 info->desc->ops->fetch_response(cinfo, xfer);
0844 xfer->state = SCMI_XFER_RESP_OK;
0845 }
0846 spin_unlock_irqrestore(&xfer->lock, flags);
0847
0848
0849 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
0850 "RESP",
0851 xfer->hdr.seq, xfer->hdr.status,
0852 xfer->rx.buf, xfer->rx.len);
0853 }
0854 } else {
0855
0856 if (!wait_for_completion_timeout(&xfer->done,
0857 msecs_to_jiffies(timeout_ms))) {
0858 dev_err(dev, "timed out in resp(caller: %pS)\n",
0859 (void *)_RET_IP_);
0860 ret = -ETIMEDOUT;
0861 }
0862 }
0863
0864 return ret;
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877 static int do_xfer(const struct scmi_protocol_handle *ph,
0878 struct scmi_xfer *xfer)
0879 {
0880 int ret;
0881 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
0882 struct scmi_info *info = handle_to_scmi_info(pi->handle);
0883 struct device *dev = info->dev;
0884 struct scmi_chan_info *cinfo;
0885
0886
0887 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
0888 dev_warn_once(dev,
0889 "Polling mode is not supported by transport.\n");
0890 return -EINVAL;
0891 }
0892
0893 cinfo = idr_find(&info->tx_idr, pi->proto->id);
0894 if (unlikely(!cinfo))
0895 return -EINVAL;
0896
0897
0898 if (is_polling_enabled(cinfo, info))
0899 xfer->hdr.poll_completion = true;
0900
0901
0902
0903
0904
0905
0906 xfer->hdr.protocol_id = pi->proto->id;
0907 reinit_completion(&xfer->done);
0908
0909 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
0910 xfer->hdr.protocol_id, xfer->hdr.seq,
0911 xfer->hdr.poll_completion);
0912
0913 xfer->state = SCMI_XFER_SENT_OK;
0914
0915
0916
0917
0918
0919
0920
0921 smp_mb();
0922
0923 ret = info->desc->ops->send_message(cinfo, xfer);
0924 if (ret < 0) {
0925 dev_dbg(dev, "Failed to send message %d\n", ret);
0926 return ret;
0927 }
0928
0929 trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
0930 xfer->hdr.seq, xfer->hdr.status,
0931 xfer->tx.buf, xfer->tx.len);
0932
0933 ret = scmi_wait_for_message_response(cinfo, xfer);
0934 if (!ret && xfer->hdr.status)
0935 ret = scmi_to_linux_errno(xfer->hdr.status);
0936
0937 if (info->desc->ops->mark_txdone)
0938 info->desc->ops->mark_txdone(cinfo, ret, xfer);
0939
0940 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
0941 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
0942
0943 return ret;
0944 }
0945
0946 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
0947 struct scmi_xfer *xfer)
0948 {
0949 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
0950 struct scmi_info *info = handle_to_scmi_info(pi->handle);
0951
0952 xfer->rx.len = info->desc->max_msg_size;
0953 }
0954
0955 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
0982 struct scmi_xfer *xfer)
0983 {
0984 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
0985 DECLARE_COMPLETION_ONSTACK(async_response);
0986
0987 xfer->async_done = &async_response;
0988
0989
0990
0991
0992
0993
0994
0995 WARN_ON_ONCE(xfer->hdr.poll_completion);
0996
0997 ret = do_xfer(ph, xfer);
0998 if (!ret) {
0999 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1000 dev_err(ph->dev,
1001 "timed out in delayed resp(caller: %pS)\n",
1002 (void *)_RET_IP_);
1003 ret = -ETIMEDOUT;
1004 } else if (xfer->hdr.status) {
1005 ret = scmi_to_linux_errno(xfer->hdr.status);
1006 }
1007 }
1008
1009 xfer->async_done = NULL;
1010 return ret;
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1029 u8 msg_id, size_t tx_size, size_t rx_size,
1030 struct scmi_xfer **p)
1031 {
1032 int ret;
1033 struct scmi_xfer *xfer;
1034 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1035 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1036 struct scmi_xfers_info *minfo = &info->tx_minfo;
1037 struct device *dev = info->dev;
1038
1039
1040 if (rx_size > info->desc->max_msg_size ||
1041 tx_size > info->desc->max_msg_size)
1042 return -ERANGE;
1043
1044 xfer = scmi_xfer_get(pi->handle, minfo, true);
1045 if (IS_ERR(xfer)) {
1046 ret = PTR_ERR(xfer);
1047 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1048 return ret;
1049 }
1050
1051 xfer->tx.len = tx_size;
1052 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1053 xfer->hdr.type = MSG_TYPE_COMMAND;
1054 xfer->hdr.id = msg_id;
1055 xfer->hdr.poll_completion = false;
1056
1057 *p = xfer;
1058
1059 return 0;
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1073 {
1074 int ret;
1075 __le32 *rev_info;
1076 struct scmi_xfer *t;
1077
1078 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1079 if (ret)
1080 return ret;
1081
1082 ret = do_xfer(ph, t);
1083 if (!ret) {
1084 rev_info = t->rx.buf;
1085 *version = le32_to_cpu(*rev_info);
1086 }
1087
1088 xfer_put(ph, t);
1089 return ret;
1090 }
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1101 void *priv)
1102 {
1103 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1104
1105 pi->priv = priv;
1106
1107 return 0;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1118 {
1119 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1120
1121 return pi->priv;
1122 }
1123
1124 static const struct scmi_xfer_ops xfer_ops = {
1125 .version_get = version_get,
1126 .xfer_get_init = xfer_get_init,
1127 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1128 .do_xfer = do_xfer,
1129 .do_xfer_with_response = do_xfer_with_response,
1130 .xfer_put = xfer_put,
1131 };
1132
1133 struct scmi_msg_resp_domain_name_get {
1134 __le32 flags;
1135 u8 name[SCMI_MAX_STR_SIZE];
1136 };
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1150 u8 cmd_id, u32 res_id, char *name,
1151 size_t len)
1152 {
1153 int ret;
1154 struct scmi_xfer *t;
1155 struct scmi_msg_resp_domain_name_get *resp;
1156
1157 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
1158 sizeof(*resp), &t);
1159 if (ret)
1160 goto out;
1161
1162 put_unaligned_le32(res_id, t->tx.buf);
1163 resp = t->rx.buf;
1164
1165 ret = ph->xops->do_xfer(ph, t);
1166 if (!ret)
1167 strscpy(name, resp->name, len);
1168
1169 ph->xops->xfer_put(ph, t);
1170 out:
1171 if (ret)
1172 dev_warn(ph->dev,
1173 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1174 res_id, ret, name);
1175 return ret;
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 struct scmi_iterator {
1194 void *msg;
1195 void *resp;
1196 struct scmi_xfer *t;
1197 const struct scmi_protocol_handle *ph;
1198 struct scmi_iterator_ops *ops;
1199 struct scmi_iterator_state state;
1200 void *priv;
1201 };
1202
1203 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1204 struct scmi_iterator_ops *ops,
1205 unsigned int max_resources, u8 msg_id,
1206 size_t tx_size, void *priv)
1207 {
1208 int ret;
1209 struct scmi_iterator *i;
1210
1211 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1212 if (!i)
1213 return ERR_PTR(-ENOMEM);
1214
1215 i->ph = ph;
1216 i->ops = ops;
1217 i->priv = priv;
1218
1219 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1220 if (ret) {
1221 devm_kfree(ph->dev, i);
1222 return ERR_PTR(ret);
1223 }
1224
1225 i->state.max_resources = max_resources;
1226 i->msg = i->t->tx.buf;
1227 i->resp = i->t->rx.buf;
1228
1229 return i;
1230 }
1231
1232 static int scmi_iterator_run(void *iter)
1233 {
1234 int ret = -EINVAL;
1235 struct scmi_iterator_ops *iops;
1236 const struct scmi_protocol_handle *ph;
1237 struct scmi_iterator_state *st;
1238 struct scmi_iterator *i = iter;
1239
1240 if (!i || !i->ops || !i->ph)
1241 return ret;
1242
1243 iops = i->ops;
1244 ph = i->ph;
1245 st = &i->state;
1246
1247 do {
1248 iops->prepare_message(i->msg, st->desc_index, i->priv);
1249 ret = ph->xops->do_xfer(ph, i->t);
1250 if (ret)
1251 break;
1252
1253 st->rx_len = i->t->rx.len;
1254 ret = iops->update_state(st, i->resp, i->priv);
1255 if (ret)
1256 break;
1257
1258 if (st->num_returned > st->max_resources - st->desc_index) {
1259 dev_err(ph->dev,
1260 "No. of resources can't exceed %d\n",
1261 st->max_resources);
1262 ret = -EINVAL;
1263 break;
1264 }
1265
1266 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1267 st->loop_idx++) {
1268 ret = iops->process_response(ph, i->resp, st, i->priv);
1269 if (ret)
1270 goto out;
1271 }
1272
1273 st->desc_index += st->num_returned;
1274 ph->xops->reset_rx_to_maxsz(ph, i->t);
1275
1276
1277
1278
1279 } while (st->num_returned && st->num_remaining);
1280
1281 out:
1282
1283 ph->xops->xfer_put(ph, i->t);
1284 devm_kfree(ph->dev, i);
1285
1286 return ret;
1287 }
1288
1289 struct scmi_msg_get_fc_info {
1290 __le32 domain;
1291 __le32 message_id;
1292 };
1293
1294 struct scmi_msg_resp_desc_fc {
1295 __le32 attr;
1296 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1297 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1298 __le32 rate_limit;
1299 __le32 chan_addr_low;
1300 __le32 chan_addr_high;
1301 __le32 chan_size;
1302 __le32 db_addr_low;
1303 __le32 db_addr_high;
1304 __le32 db_set_lmask;
1305 __le32 db_set_hmask;
1306 __le32 db_preserve_lmask;
1307 __le32 db_preserve_hmask;
1308 };
1309
1310 static void
1311 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1312 u8 describe_id, u32 message_id, u32 valid_size,
1313 u32 domain, void __iomem **p_addr,
1314 struct scmi_fc_db_info **p_db)
1315 {
1316 int ret;
1317 u32 flags;
1318 u64 phys_addr;
1319 u8 size;
1320 void __iomem *addr;
1321 struct scmi_xfer *t;
1322 struct scmi_fc_db_info *db = NULL;
1323 struct scmi_msg_get_fc_info *info;
1324 struct scmi_msg_resp_desc_fc *resp;
1325 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1326
1327 if (!p_addr) {
1328 ret = -EINVAL;
1329 goto err_out;
1330 }
1331
1332 ret = ph->xops->xfer_get_init(ph, describe_id,
1333 sizeof(*info), sizeof(*resp), &t);
1334 if (ret)
1335 goto err_out;
1336
1337 info = t->tx.buf;
1338 info->domain = cpu_to_le32(domain);
1339 info->message_id = cpu_to_le32(message_id);
1340
1341
1342
1343
1344
1345
1346 ret = ph->xops->do_xfer(ph, t);
1347 if (ret)
1348 goto err_xfer;
1349
1350 resp = t->rx.buf;
1351 flags = le32_to_cpu(resp->attr);
1352 size = le32_to_cpu(resp->chan_size);
1353 if (size != valid_size) {
1354 ret = -EINVAL;
1355 goto err_xfer;
1356 }
1357
1358 phys_addr = le32_to_cpu(resp->chan_addr_low);
1359 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1360 addr = devm_ioremap(ph->dev, phys_addr, size);
1361 if (!addr) {
1362 ret = -EADDRNOTAVAIL;
1363 goto err_xfer;
1364 }
1365
1366 *p_addr = addr;
1367
1368 if (p_db && SUPPORTS_DOORBELL(flags)) {
1369 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1370 if (!db) {
1371 ret = -ENOMEM;
1372 goto err_db;
1373 }
1374
1375 size = 1 << DOORBELL_REG_WIDTH(flags);
1376 phys_addr = le32_to_cpu(resp->db_addr_low);
1377 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1378 addr = devm_ioremap(ph->dev, phys_addr, size);
1379 if (!addr) {
1380 ret = -EADDRNOTAVAIL;
1381 goto err_db_mem;
1382 }
1383
1384 db->addr = addr;
1385 db->width = size;
1386 db->set = le32_to_cpu(resp->db_set_lmask);
1387 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1388 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1389 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1390
1391 *p_db = db;
1392 }
1393
1394 ph->xops->xfer_put(ph, t);
1395
1396 dev_dbg(ph->dev,
1397 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1398 pi->proto->id, message_id, domain);
1399
1400 return;
1401
1402 err_db_mem:
1403 devm_kfree(ph->dev, db);
1404
1405 err_db:
1406 *p_addr = NULL;
1407
1408 err_xfer:
1409 ph->xops->xfer_put(ph, t);
1410
1411 err_out:
1412 dev_warn(ph->dev,
1413 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1414 pi->proto->id, message_id, domain, ret);
1415 }
1416
1417 #define SCMI_PROTO_FC_RING_DB(w) \
1418 do { \
1419 u##w val = 0; \
1420 \
1421 if (db->mask) \
1422 val = ioread##w(db->addr) & db->mask; \
1423 iowrite##w((u##w)db->set | val, db->addr); \
1424 } while (0)
1425
1426 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1427 {
1428 if (!db || !db->addr)
1429 return;
1430
1431 if (db->width == 1)
1432 SCMI_PROTO_FC_RING_DB(8);
1433 else if (db->width == 2)
1434 SCMI_PROTO_FC_RING_DB(16);
1435 else if (db->width == 4)
1436 SCMI_PROTO_FC_RING_DB(32);
1437 else
1438 #ifdef CONFIG_64BIT
1439 SCMI_PROTO_FC_RING_DB(64);
1440 #else
1441 {
1442 u64 val = 0;
1443
1444 if (db->mask)
1445 val = ioread64_hi_lo(db->addr) & db->mask;
1446 iowrite64_hi_lo(db->set | val, db->addr);
1447 }
1448 #endif
1449 }
1450
1451 static const struct scmi_proto_helpers_ops helpers_ops = {
1452 .extended_name_get = scmi_common_extended_name_get,
1453 .iter_response_init = scmi_iterator_init,
1454 .iter_response_run = scmi_iterator_run,
1455 .fastchannel_init = scmi_common_fastchannel_init,
1456 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
1457 };
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 struct scmi_revision_info *
1471 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1472 {
1473 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1474
1475 return pi->handle->version;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494 static struct scmi_protocol_instance *
1495 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1496 const struct scmi_protocol *proto)
1497 {
1498 int ret = -ENOMEM;
1499 void *gid;
1500 struct scmi_protocol_instance *pi;
1501 const struct scmi_handle *handle = &info->handle;
1502
1503
1504 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1505 if (!gid) {
1506 scmi_protocol_put(proto->id);
1507 goto out;
1508 }
1509
1510 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1511 if (!pi)
1512 goto clean;
1513
1514 pi->gid = gid;
1515 pi->proto = proto;
1516 pi->handle = handle;
1517 pi->ph.dev = handle->dev;
1518 pi->ph.xops = &xfer_ops;
1519 pi->ph.hops = &helpers_ops;
1520 pi->ph.set_priv = scmi_set_protocol_priv;
1521 pi->ph.get_priv = scmi_get_protocol_priv;
1522 refcount_set(&pi->users, 1);
1523
1524 ret = pi->proto->instance_init(&pi->ph);
1525 if (ret)
1526 goto clean;
1527
1528 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1529 GFP_KERNEL);
1530 if (ret != proto->id)
1531 goto clean;
1532
1533
1534
1535
1536
1537 if (pi->proto->events) {
1538 ret = scmi_register_protocol_events(handle, pi->proto->id,
1539 &pi->ph,
1540 pi->proto->events);
1541 if (ret)
1542 dev_warn(handle->dev,
1543 "Protocol:%X - Events Registration Failed - err:%d\n",
1544 pi->proto->id, ret);
1545 }
1546
1547 devres_close_group(handle->dev, pi->gid);
1548 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1549
1550 return pi;
1551
1552 clean:
1553
1554 scmi_protocol_put(proto->id);
1555 devres_release_group(handle->dev, gid);
1556 out:
1557 return ERR_PTR(ret);
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 static struct scmi_protocol_instance * __must_check
1574 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1575 {
1576 struct scmi_protocol_instance *pi;
1577 struct scmi_info *info = handle_to_scmi_info(handle);
1578
1579 mutex_lock(&info->protocols_mtx);
1580 pi = idr_find(&info->protocols, protocol_id);
1581
1582 if (pi) {
1583 refcount_inc(&pi->users);
1584 } else {
1585 const struct scmi_protocol *proto;
1586
1587
1588 proto = scmi_protocol_get(protocol_id);
1589 if (proto)
1590 pi = scmi_alloc_init_protocol_instance(info, proto);
1591 else
1592 pi = ERR_PTR(-EPROBE_DEFER);
1593 }
1594 mutex_unlock(&info->protocols_mtx);
1595
1596 return pi;
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1610 {
1611 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1623 {
1624 struct scmi_info *info = handle_to_scmi_info(handle);
1625 struct scmi_protocol_instance *pi;
1626
1627 mutex_lock(&info->protocols_mtx);
1628 pi = idr_find(&info->protocols, protocol_id);
1629 if (WARN_ON(!pi))
1630 goto out;
1631
1632 if (refcount_dec_and_test(&pi->users)) {
1633 void *gid = pi->gid;
1634
1635 if (pi->proto->events)
1636 scmi_deregister_protocol_events(handle, protocol_id);
1637
1638 if (pi->proto->instance_deinit)
1639 pi->proto->instance_deinit(&pi->ph);
1640
1641 idr_remove(&info->protocols, protocol_id);
1642
1643 scmi_protocol_put(protocol_id);
1644
1645 devres_release_group(handle->dev, gid);
1646 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1647 protocol_id);
1648 }
1649
1650 out:
1651 mutex_unlock(&info->protocols_mtx);
1652 }
1653
1654 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1655 u8 *prot_imp)
1656 {
1657 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1658 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1659
1660 info->protocols_imp = prot_imp;
1661 }
1662
1663 static bool
1664 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1665 {
1666 int i;
1667 struct scmi_info *info = handle_to_scmi_info(handle);
1668 struct scmi_revision_info *rev = handle->version;
1669
1670 if (!info->protocols_imp)
1671 return false;
1672
1673 for (i = 0; i < rev->num_protocols; i++)
1674 if (info->protocols_imp[i] == prot_id)
1675 return true;
1676 return false;
1677 }
1678
1679 struct scmi_protocol_devres {
1680 const struct scmi_handle *handle;
1681 u8 protocol_id;
1682 };
1683
1684 static void scmi_devm_release_protocol(struct device *dev, void *res)
1685 {
1686 struct scmi_protocol_devres *dres = res;
1687
1688 scmi_protocol_release(dres->handle, dres->protocol_id);
1689 }
1690
1691 static struct scmi_protocol_instance __must_check *
1692 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
1693 {
1694 struct scmi_protocol_instance *pi;
1695 struct scmi_protocol_devres *dres;
1696
1697 dres = devres_alloc(scmi_devm_release_protocol,
1698 sizeof(*dres), GFP_KERNEL);
1699 if (!dres)
1700 return ERR_PTR(-ENOMEM);
1701
1702 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
1703 if (IS_ERR(pi)) {
1704 devres_free(dres);
1705 return pi;
1706 }
1707
1708 dres->handle = sdev->handle;
1709 dres->protocol_id = protocol_id;
1710 devres_add(&sdev->dev, dres);
1711
1712 return pi;
1713 }
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 static const void __must_check *
1734 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1735 struct scmi_protocol_handle **ph)
1736 {
1737 struct scmi_protocol_instance *pi;
1738
1739 if (!ph)
1740 return ERR_PTR(-EINVAL);
1741
1742 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
1743 if (IS_ERR(pi))
1744 return pi;
1745
1746 *ph = &pi->ph;
1747
1748 return pi->proto->ops;
1749 }
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
1768 u8 protocol_id)
1769 {
1770 struct scmi_protocol_instance *pi;
1771
1772 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
1773 if (IS_ERR(pi))
1774 return PTR_ERR(pi);
1775
1776 return 0;
1777 }
1778
1779 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1780 {
1781 struct scmi_protocol_devres *dres = res;
1782
1783 if (WARN_ON(!dres || !data))
1784 return 0;
1785
1786 return dres->protocol_id == *((u8 *)data);
1787 }
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1799 {
1800 int ret;
1801
1802 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1803 scmi_devm_protocol_match, &protocol_id);
1804 WARN_ON(ret);
1805 }
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1818 unsigned int *atomic_threshold)
1819 {
1820 bool ret;
1821 struct scmi_info *info = handle_to_scmi_info(handle);
1822
1823 ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1824 if (ret && atomic_threshold)
1825 *atomic_threshold = info->atomic_threshold;
1826
1827 return ret;
1828 }
1829
1830 static inline
1831 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1832 {
1833 info->users++;
1834 return &info->handle;
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 struct scmi_handle *scmi_handle_get(struct device *dev)
1849 {
1850 struct list_head *p;
1851 struct scmi_info *info;
1852 struct scmi_handle *handle = NULL;
1853
1854 mutex_lock(&scmi_list_mutex);
1855 list_for_each(p, &scmi_list) {
1856 info = list_entry(p, struct scmi_info, node);
1857 if (dev->parent == info->dev) {
1858 handle = scmi_handle_get_from_info_unlocked(info);
1859 break;
1860 }
1861 }
1862 mutex_unlock(&scmi_list_mutex);
1863
1864 return handle;
1865 }
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 int scmi_handle_put(const struct scmi_handle *handle)
1880 {
1881 struct scmi_info *info;
1882
1883 if (!handle)
1884 return -EINVAL;
1885
1886 info = handle_to_scmi_info(handle);
1887 mutex_lock(&scmi_list_mutex);
1888 if (!WARN_ON(!info->users))
1889 info->users--;
1890 mutex_unlock(&scmi_list_mutex);
1891
1892 return 0;
1893 }
1894
1895 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1896 struct scmi_xfers_info *info)
1897 {
1898 int i;
1899 struct scmi_xfer *xfer;
1900 struct device *dev = sinfo->dev;
1901 const struct scmi_desc *desc = sinfo->desc;
1902
1903
1904 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1905 dev_err(dev,
1906 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1907 info->max_msg, MSG_TOKEN_MAX);
1908 return -EINVAL;
1909 }
1910
1911 hash_init(info->pending_xfers);
1912
1913
1914 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1915 sizeof(long), GFP_KERNEL);
1916 if (!info->xfer_alloc_table)
1917 return -ENOMEM;
1918
1919
1920
1921
1922
1923
1924 INIT_HLIST_HEAD(&info->free_xfers);
1925 for (i = 0; i < info->max_msg; i++) {
1926 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1927 if (!xfer)
1928 return -ENOMEM;
1929
1930 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1931 GFP_KERNEL);
1932 if (!xfer->rx.buf)
1933 return -ENOMEM;
1934
1935 xfer->tx.buf = xfer->rx.buf;
1936 init_completion(&xfer->done);
1937 spin_lock_init(&xfer->lock);
1938
1939
1940 hlist_add_head(&xfer->node, &info->free_xfers);
1941 }
1942
1943 spin_lock_init(&info->xfer_lock);
1944
1945 return 0;
1946 }
1947
1948 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1949 {
1950 const struct scmi_desc *desc = sinfo->desc;
1951
1952 if (!desc->ops->get_max_msg) {
1953 sinfo->tx_minfo.max_msg = desc->max_msg;
1954 sinfo->rx_minfo.max_msg = desc->max_msg;
1955 } else {
1956 struct scmi_chan_info *base_cinfo;
1957
1958 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1959 if (!base_cinfo)
1960 return -EINVAL;
1961 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1962
1963
1964 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1965 if (base_cinfo)
1966 sinfo->rx_minfo.max_msg =
1967 desc->ops->get_max_msg(base_cinfo);
1968 }
1969
1970 return 0;
1971 }
1972
1973 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1974 {
1975 int ret;
1976
1977 ret = scmi_channels_max_msg_configure(sinfo);
1978 if (ret)
1979 return ret;
1980
1981 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1982 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1983 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1984
1985 return ret;
1986 }
1987
1988 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1989 int prot_id, bool tx)
1990 {
1991 int ret, idx;
1992 struct scmi_chan_info *cinfo;
1993 struct idr *idr;
1994
1995
1996 idx = tx ? 0 : 1;
1997 idr = tx ? &info->tx_idr : &info->rx_idr;
1998
1999
2000 cinfo = idr_find(idr, prot_id);
2001 if (cinfo)
2002 return 0;
2003
2004 if (!info->desc->ops->chan_available(dev, idx)) {
2005 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2006 if (unlikely(!cinfo))
2007 return -EINVAL;
2008 goto idr_alloc;
2009 }
2010
2011 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2012 if (!cinfo)
2013 return -ENOMEM;
2014
2015 cinfo->dev = dev;
2016
2017 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2018 if (ret)
2019 return ret;
2020
2021 if (tx && is_polling_required(cinfo, info)) {
2022 if (is_transport_polling_capable(info))
2023 dev_info(dev,
2024 "Enabled polling mode TX channel - prot_id:%d\n",
2025 prot_id);
2026 else
2027 dev_warn(dev,
2028 "Polling mode NOT supported by transport.\n");
2029 }
2030
2031 idr_alloc:
2032 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2033 if (ret != prot_id) {
2034 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
2035 return ret;
2036 }
2037
2038 cinfo->handle = &info->handle;
2039 return 0;
2040 }
2041
2042 static inline int
2043 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
2044 {
2045 int ret = scmi_chan_setup(info, dev, prot_id, true);
2046
2047 if (!ret)
2048 scmi_chan_setup(info, dev, prot_id, false);
2049
2050 return ret;
2051 }
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 static inline struct scmi_device *
2072 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
2073 int prot_id, const char *name)
2074 {
2075 struct scmi_device *sdev;
2076
2077
2078 sdev = scmi_child_dev_find(info->dev, prot_id, name);
2079 if (sdev)
2080 return sdev;
2081
2082 mutex_lock(&scmi_syspower_mtx);
2083 if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
2084 dev_warn(info->dev,
2085 "SCMI SystemPower protocol device must be unique !\n");
2086 mutex_unlock(&scmi_syspower_mtx);
2087
2088 return NULL;
2089 }
2090
2091 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
2092
2093 sdev = scmi_device_create(np, info->dev, prot_id, name);
2094 if (!sdev) {
2095 dev_err(info->dev, "failed to create %d protocol device\n",
2096 prot_id);
2097 mutex_unlock(&scmi_syspower_mtx);
2098
2099 return NULL;
2100 }
2101
2102 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
2103 dev_err(&sdev->dev, "failed to setup transport\n");
2104 scmi_device_destroy(sdev);
2105 mutex_unlock(&scmi_syspower_mtx);
2106
2107 return NULL;
2108 }
2109
2110 if (prot_id == SCMI_PROTOCOL_SYSTEM)
2111 scmi_syspower_registered = true;
2112
2113 mutex_unlock(&scmi_syspower_mtx);
2114
2115 return sdev;
2116 }
2117
2118 static inline void
2119 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
2120 int prot_id, const char *name)
2121 {
2122 struct scmi_device *sdev;
2123
2124 sdev = scmi_get_protocol_device(np, info, prot_id, name);
2125 if (!sdev)
2126 return;
2127
2128
2129 scmi_set_handle(sdev);
2130 }
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143 static void scmi_create_protocol_devices(struct device_node *np,
2144 struct scmi_info *info, int prot_id)
2145 {
2146 struct list_head *phead;
2147
2148 mutex_lock(&scmi_requested_devices_mtx);
2149 phead = idr_find(&scmi_requested_devices, prot_id);
2150 if (phead) {
2151 struct scmi_requested_dev *rdev;
2152
2153 list_for_each_entry(rdev, phead, node)
2154 scmi_create_protocol_device(np, info, prot_id,
2155 rdev->id_table->name);
2156 }
2157 mutex_unlock(&scmi_requested_devices_mtx);
2158 }
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
2184 {
2185 int ret = 0;
2186 unsigned int id = 0;
2187 struct list_head *head, *phead = NULL;
2188 struct scmi_requested_dev *rdev;
2189 struct scmi_info *info;
2190
2191 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
2192 id_table->name, id_table->protocol_id);
2193
2194
2195
2196
2197
2198 mutex_lock(&scmi_requested_devices_mtx);
2199 idr_for_each_entry(&scmi_requested_devices, head, id) {
2200 if (!phead) {
2201
2202 rdev = list_first_entry(head, struct scmi_requested_dev,
2203 node);
2204 if (rdev->id_table->protocol_id ==
2205 id_table->protocol_id)
2206 phead = head;
2207 }
2208 list_for_each_entry(rdev, head, node) {
2209 if (!strcmp(rdev->id_table->name, id_table->name)) {
2210 pr_err("Ignoring duplicate request [%d] %s\n",
2211 rdev->id_table->protocol_id,
2212 rdev->id_table->name);
2213 ret = -EINVAL;
2214 goto out;
2215 }
2216 }
2217 }
2218
2219
2220
2221
2222
2223 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2224 if (!rdev) {
2225 ret = -ENOMEM;
2226 goto out;
2227 }
2228 rdev->id_table = id_table;
2229
2230
2231
2232
2233
2234
2235 if (!phead) {
2236 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
2237 if (!phead) {
2238 kfree(rdev);
2239 ret = -ENOMEM;
2240 goto out;
2241 }
2242 INIT_LIST_HEAD(phead);
2243
2244 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
2245 id_table->protocol_id,
2246 id_table->protocol_id + 1, GFP_KERNEL);
2247 if (ret != id_table->protocol_id) {
2248 pr_err("Failed to save SCMI device - ret:%d\n", ret);
2249 kfree(rdev);
2250 kfree(phead);
2251 ret = -EINVAL;
2252 goto out;
2253 }
2254 ret = 0;
2255 }
2256 list_add(&rdev->node, phead);
2257
2258
2259
2260
2261
2262
2263
2264 mutex_lock(&scmi_list_mutex);
2265 list_for_each_entry(info, &scmi_list, node) {
2266 struct device_node *child;
2267
2268 child = idr_find(&info->active_protocols,
2269 id_table->protocol_id);
2270 if (child) {
2271 struct scmi_device *sdev;
2272
2273 sdev = scmi_get_protocol_device(child, info,
2274 id_table->protocol_id,
2275 id_table->name);
2276
2277 if (sdev && !sdev->handle)
2278 sdev->handle =
2279 scmi_handle_get_from_info_unlocked(info);
2280 } else {
2281 dev_err(info->dev,
2282 "Failed. SCMI protocol %d not active.\n",
2283 id_table->protocol_id);
2284 }
2285 }
2286 mutex_unlock(&scmi_list_mutex);
2287
2288 out:
2289 mutex_unlock(&scmi_requested_devices_mtx);
2290
2291 return ret;
2292 }
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
2309 {
2310 struct list_head *phead;
2311
2312 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
2313 id_table->name, id_table->protocol_id);
2314
2315 mutex_lock(&scmi_requested_devices_mtx);
2316 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
2317 if (phead) {
2318 struct scmi_requested_dev *victim, *tmp;
2319
2320 list_for_each_entry_safe(victim, tmp, phead, node) {
2321 if (!strcmp(victim->id_table->name, id_table->name)) {
2322 list_del(&victim->node);
2323 kfree(victim);
2324 break;
2325 }
2326 }
2327
2328 if (list_empty(phead)) {
2329 idr_remove(&scmi_requested_devices,
2330 id_table->protocol_id);
2331 kfree(phead);
2332 }
2333 }
2334 mutex_unlock(&scmi_requested_devices_mtx);
2335 }
2336
2337 static int scmi_cleanup_txrx_channels(struct scmi_info *info)
2338 {
2339 int ret;
2340 struct idr *idr = &info->tx_idr;
2341
2342 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2343 idr_destroy(&info->tx_idr);
2344
2345 idr = &info->rx_idr;
2346 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2347 idr_destroy(&info->rx_idr);
2348
2349 return ret;
2350 }
2351
2352 static int scmi_probe(struct platform_device *pdev)
2353 {
2354 int ret;
2355 struct scmi_handle *handle;
2356 const struct scmi_desc *desc;
2357 struct scmi_info *info;
2358 struct device *dev = &pdev->dev;
2359 struct device_node *child, *np = dev->of_node;
2360
2361 desc = of_device_get_match_data(dev);
2362 if (!desc)
2363 return -EINVAL;
2364
2365 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2366 if (!info)
2367 return -ENOMEM;
2368
2369 info->dev = dev;
2370 info->desc = desc;
2371 INIT_LIST_HEAD(&info->node);
2372 idr_init(&info->protocols);
2373 mutex_init(&info->protocols_mtx);
2374 idr_init(&info->active_protocols);
2375
2376 platform_set_drvdata(pdev, info);
2377 idr_init(&info->tx_idr);
2378 idr_init(&info->rx_idr);
2379
2380 handle = &info->handle;
2381 handle->dev = info->dev;
2382 handle->version = &info->version;
2383 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
2384 handle->devm_protocol_get = scmi_devm_protocol_get;
2385 handle->devm_protocol_put = scmi_devm_protocol_put;
2386
2387
2388 if (!of_property_read_u32(np, "atomic-threshold-us",
2389 &info->atomic_threshold))
2390 dev_info(dev,
2391 "SCMI System wide atomic threshold set to %d us\n",
2392 info->atomic_threshold);
2393 handle->is_transport_atomic = scmi_is_transport_atomic;
2394
2395 if (desc->ops->link_supplier) {
2396 ret = desc->ops->link_supplier(dev);
2397 if (ret)
2398 return ret;
2399 }
2400
2401 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
2402 if (ret)
2403 return ret;
2404
2405 ret = scmi_xfer_info_init(info);
2406 if (ret)
2407 goto clear_txrx_setup;
2408
2409 if (scmi_notification_init(handle))
2410 dev_err(dev, "SCMI Notifications NOT available.\n");
2411
2412 if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
2413 dev_err(dev,
2414 "Transport is not polling capable. Atomic mode not supported.\n");
2415
2416
2417
2418
2419
2420
2421 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2422 if (ret) {
2423 dev_err(dev, "unable to communicate with SCMI\n");
2424 goto notification_exit;
2425 }
2426
2427 mutex_lock(&scmi_list_mutex);
2428 list_add_tail(&info->node, &scmi_list);
2429 mutex_unlock(&scmi_list_mutex);
2430
2431 for_each_available_child_of_node(np, child) {
2432 u32 prot_id;
2433
2434 if (of_property_read_u32(child, "reg", &prot_id))
2435 continue;
2436
2437 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2438 dev_err(dev, "Out of range protocol %d\n", prot_id);
2439
2440 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2441 dev_err(dev, "SCMI protocol %d not implemented\n",
2442 prot_id);
2443 continue;
2444 }
2445
2446
2447
2448
2449
2450 ret = idr_alloc(&info->active_protocols, child,
2451 prot_id, prot_id + 1, GFP_KERNEL);
2452 if (ret != prot_id) {
2453 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2454 prot_id);
2455 continue;
2456 }
2457
2458 of_node_get(child);
2459 scmi_create_protocol_devices(child, info, prot_id);
2460 }
2461
2462 return 0;
2463
2464 notification_exit:
2465 scmi_notification_exit(&info->handle);
2466 clear_txrx_setup:
2467 scmi_cleanup_txrx_channels(info);
2468 return ret;
2469 }
2470
2471 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2472 {
2473 idr_remove(idr, id);
2474 }
2475
2476 static int scmi_remove(struct platform_device *pdev)
2477 {
2478 int ret = 0, id;
2479 struct scmi_info *info = platform_get_drvdata(pdev);
2480 struct device_node *child;
2481
2482 mutex_lock(&scmi_list_mutex);
2483 if (info->users)
2484 ret = -EBUSY;
2485 else
2486 list_del(&info->node);
2487 mutex_unlock(&scmi_list_mutex);
2488
2489 if (ret)
2490 return ret;
2491
2492 scmi_notification_exit(&info->handle);
2493
2494 mutex_lock(&info->protocols_mtx);
2495 idr_destroy(&info->protocols);
2496 mutex_unlock(&info->protocols_mtx);
2497
2498 idr_for_each_entry(&info->active_protocols, child, id)
2499 of_node_put(child);
2500 idr_destroy(&info->active_protocols);
2501
2502
2503 return scmi_cleanup_txrx_channels(info);
2504 }
2505
2506 static ssize_t protocol_version_show(struct device *dev,
2507 struct device_attribute *attr, char *buf)
2508 {
2509 struct scmi_info *info = dev_get_drvdata(dev);
2510
2511 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2512 info->version.minor_ver);
2513 }
2514 static DEVICE_ATTR_RO(protocol_version);
2515
2516 static ssize_t firmware_version_show(struct device *dev,
2517 struct device_attribute *attr, char *buf)
2518 {
2519 struct scmi_info *info = dev_get_drvdata(dev);
2520
2521 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2522 }
2523 static DEVICE_ATTR_RO(firmware_version);
2524
2525 static ssize_t vendor_id_show(struct device *dev,
2526 struct device_attribute *attr, char *buf)
2527 {
2528 struct scmi_info *info = dev_get_drvdata(dev);
2529
2530 return sprintf(buf, "%s\n", info->version.vendor_id);
2531 }
2532 static DEVICE_ATTR_RO(vendor_id);
2533
2534 static ssize_t sub_vendor_id_show(struct device *dev,
2535 struct device_attribute *attr, char *buf)
2536 {
2537 struct scmi_info *info = dev_get_drvdata(dev);
2538
2539 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2540 }
2541 static DEVICE_ATTR_RO(sub_vendor_id);
2542
2543 static struct attribute *versions_attrs[] = {
2544 &dev_attr_firmware_version.attr,
2545 &dev_attr_protocol_version.attr,
2546 &dev_attr_vendor_id.attr,
2547 &dev_attr_sub_vendor_id.attr,
2548 NULL,
2549 };
2550 ATTRIBUTE_GROUPS(versions);
2551
2552
2553 static const struct of_device_id scmi_of_match[] = {
2554 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2555 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2556 #endif
2557 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2558 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2559 #endif
2560 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2561 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2562 #endif
2563 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2564 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2565 #endif
2566 { },
2567 };
2568
2569 MODULE_DEVICE_TABLE(of, scmi_of_match);
2570
2571 static struct platform_driver scmi_driver = {
2572 .driver = {
2573 .name = "arm-scmi",
2574 .of_match_table = scmi_of_match,
2575 .dev_groups = versions_groups,
2576 },
2577 .probe = scmi_probe,
2578 .remove = scmi_remove,
2579 };
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 static inline int __scmi_transports_setup(bool init)
2593 {
2594 int ret = 0;
2595 const struct of_device_id *trans;
2596
2597 for (trans = scmi_of_match; trans->data; trans++) {
2598 const struct scmi_desc *tdesc = trans->data;
2599
2600 if ((init && !tdesc->transport_init) ||
2601 (!init && !tdesc->transport_exit))
2602 continue;
2603
2604 if (init)
2605 ret = tdesc->transport_init();
2606 else
2607 tdesc->transport_exit();
2608
2609 if (ret) {
2610 pr_err("SCMI transport %s FAILED initialization!\n",
2611 trans->compatible);
2612 break;
2613 }
2614 }
2615
2616 return ret;
2617 }
2618
2619 static int __init scmi_transports_init(void)
2620 {
2621 return __scmi_transports_setup(true);
2622 }
2623
2624 static void __exit scmi_transports_exit(void)
2625 {
2626 __scmi_transports_setup(false);
2627 }
2628
2629 static int __init scmi_driver_init(void)
2630 {
2631 int ret;
2632
2633
2634 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2635 return -EINVAL;
2636
2637 scmi_bus_init();
2638
2639
2640 ret = scmi_transports_init();
2641 if (ret)
2642 return ret;
2643
2644 scmi_base_register();
2645
2646 scmi_clock_register();
2647 scmi_perf_register();
2648 scmi_power_register();
2649 scmi_reset_register();
2650 scmi_sensors_register();
2651 scmi_voltage_register();
2652 scmi_system_register();
2653 scmi_powercap_register();
2654
2655 return platform_driver_register(&scmi_driver);
2656 }
2657 subsys_initcall(scmi_driver_init);
2658
2659 static void __exit scmi_driver_exit(void)
2660 {
2661 scmi_base_unregister();
2662
2663 scmi_clock_unregister();
2664 scmi_perf_unregister();
2665 scmi_power_unregister();
2666 scmi_reset_unregister();
2667 scmi_sensors_unregister();
2668 scmi_voltage_unregister();
2669 scmi_system_unregister();
2670 scmi_powercap_unregister();
2671
2672 scmi_bus_exit();
2673
2674 scmi_transports_exit();
2675
2676 platform_driver_unregister(&scmi_driver);
2677 }
2678 module_exit(scmi_driver_exit);
2679
2680 MODULE_ALIAS("platform:arm-scmi");
2681 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2682 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2683 MODULE_LICENSE("GPL v2");