0001
0002
0003
0004
0005
0006
0007
0008 #include <asm/unaligned.h>
0009 #include <linux/atomic.h>
0010 #include <linux/completion.h>
0011 #include <linux/error-injection.h>
0012 #include <linux/ktime.h>
0013 #include <linux/limits.h>
0014 #include <linux/list.h>
0015 #include <linux/slab.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/types.h>
0018 #include <linux/workqueue.h>
0019
0020 #include <linux/surface_aggregator/serial_hub.h>
0021 #include <linux/surface_aggregator/controller.h>
0022
0023 #include "ssh_packet_layer.h"
0024 #include "ssh_request_layer.h"
0025
0026 #include "trace.h"
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
0037
0038
0039
0040
0041
0042
0043
0044 #define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
0045
0046
0047
0048
0049
0050
0051
0052
0053 #define SSH_RTL_MAX_PENDING 3
0054
0055
0056
0057
0058
0059
0060 #define SSH_RTL_TX_BATCH 10
0061
0062 #ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
0063
0064
0065
0066
0067
0068
0069
0070
0071 static noinline bool ssh_rtl_should_drop_response(void)
0072 {
0073 return false;
0074 }
0075 ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
0076
0077 #else
0078
0079 static inline bool ssh_rtl_should_drop_response(void)
0080 {
0081 return false;
0082 }
0083
0084 #endif
0085
0086 static u16 ssh_request_get_rqid(struct ssh_request *rqst)
0087 {
0088 return get_unaligned_le16(rqst->packet.data.ptr
0089 + SSH_MSGOFFSET_COMMAND(rqid));
0090 }
0091
0092 static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
0093 {
0094 if (!rqst->packet.data.ptr)
0095 return U32_MAX;
0096
0097 return ssh_request_get_rqid(rqst);
0098 }
0099
0100 static void ssh_rtl_queue_remove(struct ssh_request *rqst)
0101 {
0102 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0103
0104 spin_lock(&rtl->queue.lock);
0105
0106 if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
0107 spin_unlock(&rtl->queue.lock);
0108 return;
0109 }
0110
0111 list_del(&rqst->node);
0112
0113 spin_unlock(&rtl->queue.lock);
0114 ssh_request_put(rqst);
0115 }
0116
0117 static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
0118 {
0119 bool empty;
0120
0121 spin_lock(&rtl->queue.lock);
0122 empty = list_empty(&rtl->queue.head);
0123 spin_unlock(&rtl->queue.lock);
0124
0125 return empty;
0126 }
0127
0128 static void ssh_rtl_pending_remove(struct ssh_request *rqst)
0129 {
0130 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0131
0132 spin_lock(&rtl->pending.lock);
0133
0134 if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
0135 spin_unlock(&rtl->pending.lock);
0136 return;
0137 }
0138
0139 atomic_dec(&rtl->pending.count);
0140 list_del(&rqst->node);
0141
0142 spin_unlock(&rtl->pending.lock);
0143
0144 ssh_request_put(rqst);
0145 }
0146
0147 static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
0148 {
0149 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0150
0151 spin_lock(&rtl->pending.lock);
0152
0153 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
0154 spin_unlock(&rtl->pending.lock);
0155 return -EINVAL;
0156 }
0157
0158 if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
0159 spin_unlock(&rtl->pending.lock);
0160 return -EALREADY;
0161 }
0162
0163 atomic_inc(&rtl->pending.count);
0164 list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
0165
0166 spin_unlock(&rtl->pending.lock);
0167 return 0;
0168 }
0169
0170 static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
0171 {
0172 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0173
0174 trace_ssam_request_complete(rqst, status);
0175
0176
0177 rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
0178 ssh_request_get_rqid_safe(rqst), status);
0179
0180 rqst->ops->complete(rqst, NULL, NULL, status);
0181 }
0182
0183 static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
0184 const struct ssh_command *cmd,
0185 const struct ssam_span *data)
0186 {
0187 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0188
0189 trace_ssam_request_complete(rqst, 0);
0190
0191 rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
0192 ssh_request_get_rqid(rqst));
0193
0194 rqst->ops->complete(rqst, cmd, data, 0);
0195 }
0196
0197 static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
0198 {
0199 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0200
0201 if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
0202 return !atomic_read(&rtl->pending.count);
0203
0204 return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
0205 }
0206
0207 static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
0208 {
0209 struct ssh_request *rqst = ERR_PTR(-ENOENT);
0210 struct ssh_request *p, *n;
0211
0212 spin_lock(&rtl->queue.lock);
0213
0214
0215 list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
0216 if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
0217 continue;
0218
0219 if (!ssh_rtl_tx_can_process(p)) {
0220 rqst = ERR_PTR(-EBUSY);
0221 break;
0222 }
0223
0224
0225 set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
0226
0227 smp_mb__before_atomic();
0228 clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
0229
0230 list_del(&p->node);
0231
0232 rqst = p;
0233 break;
0234 }
0235
0236 spin_unlock(&rtl->queue.lock);
0237 return rqst;
0238 }
0239
0240 static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
0241 {
0242 struct ssh_request *rqst;
0243 int status;
0244
0245
0246 rqst = ssh_rtl_tx_next(rtl);
0247 if (IS_ERR(rqst))
0248 return PTR_ERR(rqst);
0249
0250
0251 status = ssh_rtl_tx_pending_push(rqst);
0252 if (status) {
0253 ssh_request_put(rqst);
0254 return -EAGAIN;
0255 }
0256
0257
0258 status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
0259 if (status == -ESHUTDOWN) {
0260
0261
0262
0263
0264 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 ssh_rtl_pending_remove(rqst);
0277 ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
0278
0279 ssh_request_put(rqst);
0280 return -ESHUTDOWN;
0281
0282 } else if (status) {
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 WARN_ON(status != -EINVAL);
0296
0297 ssh_request_put(rqst);
0298 return -EAGAIN;
0299 }
0300
0301 ssh_request_put(rqst);
0302 return 0;
0303 }
0304
0305 static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
0306 {
0307 if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
0308 return false;
0309
0310 if (ssh_rtl_queue_empty(rtl))
0311 return false;
0312
0313 return schedule_work(&rtl->tx.work);
0314 }
0315
0316 static void ssh_rtl_tx_work_fn(struct work_struct *work)
0317 {
0318 struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
0319 unsigned int iterations = SSH_RTL_TX_BATCH;
0320 int status;
0321
0322
0323
0324
0325
0326
0327 do {
0328 status = ssh_rtl_tx_try_process_one(rtl);
0329 if (status == -ENOENT || status == -EBUSY)
0330 return;
0331
0332 if (status == -ESHUTDOWN) {
0333
0334
0335
0336
0337
0338 return;
0339 }
0340
0341 WARN_ON(status != 0 && status != -EAGAIN);
0342 } while (--iterations);
0343
0344
0345 ssh_rtl_tx_schedule(rtl);
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
0362 {
0363 trace_ssam_request_submit(rqst);
0364
0365
0366
0367
0368
0369
0370 if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
0371 if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
0372 return -EINVAL;
0373
0374 spin_lock(&rtl->queue.lock);
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
0392 spin_unlock(&rtl->queue.lock);
0393 return -EALREADY;
0394 }
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 smp_mb__after_atomic();
0406
0407 if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
0408 spin_unlock(&rtl->queue.lock);
0409 return -ESHUTDOWN;
0410 }
0411
0412 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
0413 spin_unlock(&rtl->queue.lock);
0414 return -EINVAL;
0415 }
0416
0417 set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
0418 list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
0419
0420 spin_unlock(&rtl->queue.lock);
0421
0422 ssh_rtl_tx_schedule(rtl);
0423 return 0;
0424 }
0425
0426 static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
0427 ktime_t expires)
0428 {
0429 unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
0430 ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
0431
0432 spin_lock(&rtl->rtx_timeout.lock);
0433
0434
0435 if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
0436 rtl->rtx_timeout.expires = expires;
0437 mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
0438 }
0439
0440 spin_unlock(&rtl->rtx_timeout.lock);
0441 }
0442
0443 static void ssh_rtl_timeout_start(struct ssh_request *rqst)
0444 {
0445 struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0446 ktime_t timestamp = ktime_get_coarse_boottime();
0447 ktime_t timeout = rtl->rtx_timeout.timeout;
0448
0449 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
0450 return;
0451
0452
0453
0454
0455
0456 WRITE_ONCE(rqst->timestamp, timestamp);
0457
0458
0459
0460
0461
0462 smp_mb__after_atomic();
0463
0464 ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
0465 }
0466
0467 static void ssh_rtl_complete(struct ssh_rtl *rtl,
0468 const struct ssh_command *command,
0469 const struct ssam_span *command_data)
0470 {
0471 struct ssh_request *r = NULL;
0472 struct ssh_request *p, *n;
0473 u16 rqid = get_unaligned_le16(&command->rqid);
0474
0475 trace_ssam_rx_response_received(command, command_data->len);
0476
0477
0478
0479
0480
0481 spin_lock(&rtl->pending.lock);
0482 list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
0483
0484 if (unlikely(ssh_request_get_rqid(p) != rqid))
0485 continue;
0486
0487
0488 if (ssh_rtl_should_drop_response()) {
0489 spin_unlock(&rtl->pending.lock);
0490
0491 trace_ssam_ei_rx_drop_response(p);
0492 rtl_info(rtl, "request error injection: dropping response for request %p\n",
0493 &p->packet);
0494 return;
0495 }
0496
0497
0498
0499
0500
0501 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
0502 set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
0503
0504 smp_mb__before_atomic();
0505 clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
0506
0507 atomic_dec(&rtl->pending.count);
0508 list_del(&p->node);
0509
0510 r = p;
0511 break;
0512 }
0513 spin_unlock(&rtl->pending.lock);
0514
0515 if (!r) {
0516 rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
0517 rqid);
0518 return;
0519 }
0520
0521
0522 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
0523 ssh_request_put(r);
0524 ssh_rtl_tx_schedule(rtl);
0525 return;
0526 }
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
0549 rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
0550 rqid);
0551
0552
0553
0554
0555
0556
0557
0558 ssh_rtl_queue_remove(r);
0559
0560 ssh_rtl_complete_with_status(r, -EREMOTEIO);
0561 ssh_request_put(r);
0562
0563 ssh_rtl_tx_schedule(rtl);
0564 return;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 ssh_rtl_complete_with_rsp(r, command, command_data);
0576 ssh_request_put(r);
0577
0578 ssh_rtl_tx_schedule(rtl);
0579 }
0580
0581 static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
0582 {
0583 struct ssh_rtl *rtl;
0584 unsigned long flags, fixed;
0585 bool remove;
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
0608 flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
0609
0610
0611
0612
0613
0614
0615
0616
0617 smp_mb__after_atomic();
0618
0619 if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
0620 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0621 return true;
0622
0623 ssh_rtl_complete_with_status(r, -ECANCELED);
0624 return true;
0625 }
0626
0627 rtl = ssh_request_rtl(r);
0628 spin_lock(&rtl->queue.lock);
0629
0630
0631
0632
0633
0634
0635
0636
0637 remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
0638 if (!remove) {
0639 spin_unlock(&rtl->queue.lock);
0640 return false;
0641 }
0642
0643 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0644 list_del(&r->node);
0645
0646 spin_unlock(&rtl->queue.lock);
0647
0648 ssh_request_put(r);
0649
0650 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0651 return true;
0652
0653 ssh_rtl_complete_with_status(r, -ECANCELED);
0654 return true;
0655 }
0656
0657 static bool ssh_rtl_cancel_pending(struct ssh_request *r)
0658 {
0659
0660 if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
0661 return true;
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 if (!READ_ONCE(r->packet.ptl)) {
0675 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0676 return true;
0677
0678 ssh_rtl_complete_with_status(r, -ECANCELED);
0679 return true;
0680 }
0681
0682
0683
0684
0685
0686
0687 ssh_ptl_cancel(&r->packet);
0688
0689
0690
0691
0692
0693
0694
0695 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0696 return true;
0697
0698 ssh_rtl_queue_remove(r);
0699 ssh_rtl_pending_remove(r);
0700 ssh_rtl_complete_with_status(r, -ECANCELED);
0701
0702 return true;
0703 }
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
0729 {
0730 struct ssh_rtl *rtl;
0731 bool canceled;
0732
0733 if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
0734 return true;
0735
0736 trace_ssam_request_cancel(rqst);
0737
0738 if (pending)
0739 canceled = ssh_rtl_cancel_pending(rqst);
0740 else
0741 canceled = ssh_rtl_cancel_nonpending(rqst);
0742
0743
0744 rtl = ssh_request_rtl(rqst);
0745 if (canceled && rtl)
0746 ssh_rtl_tx_schedule(rtl);
0747
0748 return canceled;
0749 }
0750
0751 static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
0752 {
0753 struct ssh_request *r = to_ssh_request(p);
0754
0755 if (unlikely(status)) {
0756 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0757
0758 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0759 return;
0760
0761
0762
0763
0764
0765
0766
0767
0768 ssh_rtl_queue_remove(r);
0769 ssh_rtl_pending_remove(r);
0770 ssh_rtl_complete_with_status(r, status);
0771
0772 ssh_rtl_tx_schedule(ssh_request_rtl(r));
0773 return;
0774 }
0775
0776
0777 set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
0778
0779 smp_mb__before_atomic();
0780 clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
0781
0782
0783 if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
0784
0785
0786
0787
0788 ssh_rtl_timeout_start(r);
0789 return;
0790 }
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0801 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0802 return;
0803
0804 ssh_rtl_pending_remove(r);
0805 ssh_rtl_complete_with_status(r, 0);
0806
0807 ssh_rtl_tx_schedule(ssh_request_rtl(r));
0808 }
0809
0810 static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
0811 {
0812 ktime_t timestamp = READ_ONCE(r->timestamp);
0813
0814 if (timestamp != KTIME_MAX)
0815 return ktime_add(timestamp, timeout);
0816 else
0817 return KTIME_MAX;
0818 }
0819
0820 static void ssh_rtl_timeout_reap(struct work_struct *work)
0821 {
0822 struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
0823 struct ssh_request *r, *n;
0824 LIST_HEAD(claimed);
0825 ktime_t now = ktime_get_coarse_boottime();
0826 ktime_t timeout = rtl->rtx_timeout.timeout;
0827 ktime_t next = KTIME_MAX;
0828
0829 trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
0830
0831
0832
0833
0834
0835 spin_lock(&rtl->rtx_timeout.lock);
0836 rtl->rtx_timeout.expires = KTIME_MAX;
0837 spin_unlock(&rtl->rtx_timeout.lock);
0838
0839 spin_lock(&rtl->pending.lock);
0840 list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
0841 ktime_t expires = ssh_request_get_expiration(r, timeout);
0842
0843
0844
0845
0846
0847 if (ktime_after(expires, now)) {
0848 next = ktime_before(expires, next) ? expires : next;
0849 continue;
0850 }
0851
0852
0853 if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
0854 continue;
0855
0856
0857
0858
0859
0860
0861
0862
0863 clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
0864
0865 atomic_dec(&rtl->pending.count);
0866 list_move_tail(&r->node, &claimed);
0867 }
0868 spin_unlock(&rtl->pending.lock);
0869
0870
0871 list_for_each_entry_safe(r, n, &claimed, node) {
0872 trace_ssam_request_timeout(r);
0873
0874
0875
0876
0877
0878
0879 if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0880 ssh_rtl_complete_with_status(r, -ETIMEDOUT);
0881
0882
0883
0884
0885
0886 list_del(&r->node);
0887 ssh_request_put(r);
0888 }
0889
0890
0891 next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
0892 if (next != KTIME_MAX)
0893 ssh_rtl_timeout_reaper_mod(rtl, now, next);
0894
0895 ssh_rtl_tx_schedule(rtl);
0896 }
0897
0898 static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
0899 const struct ssam_span *data)
0900 {
0901 trace_ssam_rx_event_received(cmd, data->len);
0902
0903 rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
0904 get_unaligned_le16(&cmd->rqid));
0905
0906 rtl->ops.handle_event(rtl, cmd, data);
0907 }
0908
0909 static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
0910 {
0911 struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
0912 struct device *dev = &p->serdev->dev;
0913 struct ssh_command *command;
0914 struct ssam_span command_data;
0915
0916 if (sshp_parse_command(dev, data, &command, &command_data))
0917 return;
0918
0919 if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
0920 ssh_rtl_rx_event(rtl, command, &command_data);
0921 else
0922 ssh_rtl_complete(rtl, command, &command_data);
0923 }
0924
0925 static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
0926 {
0927 if (!data->len) {
0928 ptl_err(p, "rtl: rx: no data frame payload\n");
0929 return;
0930 }
0931
0932 switch (data->ptr[0]) {
0933 case SSH_PLD_TYPE_CMD:
0934 ssh_rtl_rx_command(p, data);
0935 break;
0936
0937 default:
0938 ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
0939 data->ptr[0]);
0940 break;
0941 }
0942 }
0943
0944 static void ssh_rtl_packet_release(struct ssh_packet *p)
0945 {
0946 struct ssh_request *rqst;
0947
0948 rqst = to_ssh_request(p);
0949 rqst->ops->release(rqst);
0950 }
0951
0952 static const struct ssh_packet_ops ssh_rtl_packet_ops = {
0953 .complete = ssh_rtl_packet_callback,
0954 .release = ssh_rtl_packet_release,
0955 };
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970 int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
0971 const struct ssh_request_ops *ops)
0972 {
0973 unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
0974
0975
0976 if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
0977 return -EINVAL;
0978
0979 if (!(flags & SSAM_REQUEST_UNSEQUENCED))
0980 type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
0981
0982 ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
0983 &ssh_rtl_packet_ops);
0984
0985 INIT_LIST_HEAD(&rqst->node);
0986
0987 rqst->state = 0;
0988 if (flags & SSAM_REQUEST_HAS_RESPONSE)
0989 rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
0990
0991 rqst->timestamp = KTIME_MAX;
0992 rqst->ops = ops;
0993
0994 return 0;
0995 }
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
1011 const struct ssh_rtl_ops *ops)
1012 {
1013 struct ssh_ptl_ops ptl_ops;
1014 int status;
1015
1016 ptl_ops.data_received = ssh_rtl_rx_data;
1017
1018 status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
1019 if (status)
1020 return status;
1021
1022 spin_lock_init(&rtl->queue.lock);
1023 INIT_LIST_HEAD(&rtl->queue.head);
1024
1025 spin_lock_init(&rtl->pending.lock);
1026 INIT_LIST_HEAD(&rtl->pending.head);
1027 atomic_set_release(&rtl->pending.count, 0);
1028
1029 INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
1030
1031 spin_lock_init(&rtl->rtx_timeout.lock);
1032 rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
1033 rtl->rtx_timeout.expires = KTIME_MAX;
1034 INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
1035
1036 rtl->ops = *ops;
1037
1038 return 0;
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 void ssh_rtl_destroy(struct ssh_rtl *rtl)
1051 {
1052 ssh_ptl_destroy(&rtl->ptl);
1053 }
1054
1055
1056
1057
1058
1059
1060
1061 int ssh_rtl_start(struct ssh_rtl *rtl)
1062 {
1063 int status;
1064
1065 status = ssh_ptl_tx_start(&rtl->ptl);
1066 if (status)
1067 return status;
1068
1069 ssh_rtl_tx_schedule(rtl);
1070
1071 status = ssh_ptl_rx_start(&rtl->ptl);
1072 if (status) {
1073 ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
1074 ssh_ptl_tx_stop(&rtl->ptl);
1075 return status;
1076 }
1077
1078 return 0;
1079 }
1080
1081 struct ssh_flush_request {
1082 struct ssh_request base;
1083 struct completion completion;
1084 int status;
1085 };
1086
1087 static void ssh_rtl_flush_request_complete(struct ssh_request *r,
1088 const struct ssh_command *cmd,
1089 const struct ssam_span *data,
1090 int status)
1091 {
1092 struct ssh_flush_request *rqst;
1093
1094 rqst = container_of(r, struct ssh_flush_request, base);
1095 rqst->status = status;
1096 }
1097
1098 static void ssh_rtl_flush_request_release(struct ssh_request *r)
1099 {
1100 struct ssh_flush_request *rqst;
1101
1102 rqst = container_of(r, struct ssh_flush_request, base);
1103 complete_all(&rqst->completion);
1104 }
1105
1106 static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
1107 .complete = ssh_rtl_flush_request_complete,
1108 .release = ssh_rtl_flush_request_release,
1109 };
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139 int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
1140 {
1141 const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
1142 struct ssh_flush_request rqst;
1143 int status;
1144
1145 ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
1146 rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
1147 rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
1148 rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
1149
1150 init_completion(&rqst.completion);
1151
1152 status = ssh_rtl_submit(rtl, &rqst.base);
1153 if (status)
1154 return status;
1155
1156 ssh_request_put(&rqst.base);
1157
1158 if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
1159 ssh_rtl_cancel(&rqst.base, true);
1160 wait_for_completion(&rqst.completion);
1161 }
1162
1163 WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
1164 rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
1165
1166 return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 void ssh_rtl_shutdown(struct ssh_rtl *rtl)
1183 {
1184 struct ssh_request *r, *n;
1185 LIST_HEAD(claimed);
1186 int pending;
1187
1188 set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
1189
1190
1191
1192
1193
1194
1195 smp_mb__after_atomic();
1196
1197
1198 spin_lock(&rtl->queue.lock);
1199 list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
1200 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1201
1202 smp_mb__before_atomic();
1203 clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
1204
1205 list_move_tail(&r->node, &claimed);
1206 }
1207 spin_unlock(&rtl->queue.lock);
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 cancel_work_sync(&rtl->tx.work);
1220 ssh_ptl_shutdown(&rtl->ptl);
1221 cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
1222
1223
1224
1225
1226
1227
1228
1229 pending = atomic_read(&rtl->pending.count);
1230 if (WARN_ON(pending)) {
1231 spin_lock(&rtl->pending.lock);
1232 list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
1233 set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1234
1235 smp_mb__before_atomic();
1236 clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
1237
1238 list_move_tail(&r->node, &claimed);
1239 }
1240 spin_unlock(&rtl->pending.lock);
1241 }
1242
1243
1244 list_for_each_entry_safe(r, n, &claimed, node) {
1245
1246
1247
1248
1249 if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
1250 ssh_rtl_complete_with_status(r, -ESHUTDOWN);
1251
1252
1253
1254
1255
1256 list_del(&r->node);
1257 ssh_request_put(r);
1258 }
1259 }