0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 #include <linux/gcd.h>
0043 #include <linux/kernel.h>
0044 #include <linux/module.h>
0045 #include <linux/spinlock.h>
0046 #include <linux/interrupt.h>
0047 #include <linux/dma-mapping.h>
0048 #include <linux/io.h>
0049 #include <linux/slab.h>
0050 #include <linux/usb.h>
0051
0052 #include <linux/usb/hcd.h>
0053 #include <linux/usb/ch11.h>
0054
0055 #include "core.h"
0056 #include "hcd.h"
0057
0058
0059 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
0060
0061
0062 #define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
0073 {
0074
0075
0076
0077
0078
0079 int status;
0080 int num_channels;
0081
0082 num_channels = hsotg->params.host_channels;
0083 if ((hsotg->periodic_channels + hsotg->non_periodic_channels <
0084 num_channels) && (hsotg->periodic_channels < num_channels - 1)) {
0085 status = 0;
0086 } else {
0087 dev_dbg(hsotg->dev,
0088 "%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
0089 __func__, num_channels,
0090 hsotg->periodic_channels, hsotg->non_periodic_channels);
0091 status = -ENOSPC;
0092 }
0093
0094 return status;
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
0110 struct dwc2_qh *qh)
0111 {
0112 int status;
0113 s16 max_claimed_usecs;
0114
0115 status = 0;
0116
0117 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
0118
0119
0120
0121
0122 max_claimed_usecs = 100 - qh->host_us;
0123 } else {
0124
0125
0126
0127
0128 max_claimed_usecs = 900 - qh->host_us;
0129 }
0130
0131 if (hsotg->periodic_usecs > max_claimed_usecs) {
0132 dev_err(hsotg->dev,
0133 "%s: already claimed usecs %d, required usecs %d\n",
0134 __func__, hsotg->periodic_usecs, qh->host_us);
0135 status = -ENOSPC;
0136 }
0137
0138 return status;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 static int pmap_schedule(unsigned long *map, int bits_per_period,
0233 int periods_in_map, int num_bits,
0234 int interval, int start, bool only_one_period)
0235 {
0236 int interval_bits;
0237 int to_reserve;
0238 int first_end;
0239 int i;
0240
0241 if (num_bits > bits_per_period)
0242 return -ENOSPC;
0243
0244
0245 interval = gcd(interval, periods_in_map);
0246
0247 interval_bits = bits_per_period * interval;
0248 to_reserve = periods_in_map / interval;
0249
0250
0251 if (start >= interval_bits)
0252 return -ENOSPC;
0253
0254 if (only_one_period)
0255
0256 first_end = (start / bits_per_period + 1) * bits_per_period;
0257 else
0258
0259 first_end = interval_bits;
0260
0261
0262
0263
0264
0265
0266
0267 while (start + num_bits <= first_end) {
0268 int end;
0269
0270
0271 end = (start / bits_per_period + 1) * bits_per_period;
0272
0273
0274 start = bitmap_find_next_zero_area(map, end, start, num_bits,
0275 0);
0276
0277
0278
0279
0280
0281
0282 if (start >= end) {
0283 start = end;
0284 continue;
0285 }
0286
0287
0288 for (i = 1; i < to_reserve; i++) {
0289 int ith_start = start + interval_bits * i;
0290 int ith_end = end + interval_bits * i;
0291 int ret;
0292
0293
0294 ret = bitmap_find_next_zero_area(
0295 map, ith_start + num_bits, ith_start, num_bits,
0296 0);
0297
0298
0299 if (ret == ith_start)
0300 continue;
0301
0302
0303 ith_start = bitmap_find_next_zero_area(
0304 map, ith_end, ith_start, num_bits, 0);
0305 if (ith_start >= ith_end)
0306
0307 start = end;
0308 else
0309 start = ith_start - interval_bits * i;
0310 break;
0311 }
0312
0313
0314 if (i == to_reserve)
0315 break;
0316 }
0317
0318 if (start + num_bits > first_end)
0319 return -ENOSPC;
0320
0321 for (i = 0; i < to_reserve; i++) {
0322 int ith_start = start + interval_bits * i;
0323
0324 bitmap_set(map, ith_start, num_bits);
0325 }
0326
0327 return start;
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 static void pmap_unschedule(unsigned long *map, int bits_per_period,
0341 int periods_in_map, int num_bits,
0342 int interval, int start)
0343 {
0344 int interval_bits;
0345 int to_release;
0346 int i;
0347
0348
0349 interval = gcd(interval, periods_in_map);
0350
0351 interval_bits = bits_per_period * interval;
0352 to_release = periods_in_map / interval;
0353
0354 for (i = 0; i < to_release; i++) {
0355 int ith_start = start + interval_bits * i;
0356
0357 bitmap_clear(map, ith_start, num_bits);
0358 }
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
0375 struct dwc2_qh *qh)
0376 {
0377 unsigned long *map;
0378
0379
0380 if (WARN_ON(!qh->dwc_tt))
0381 return NULL;
0382
0383
0384 map = qh->dwc_tt->periodic_bitmaps;
0385 if (qh->dwc_tt->usb_tt->multi)
0386 map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
0387
0388 return map;
0389 }
0390
0391 #ifdef DWC2_PRINT_SCHEDULE
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 static __printf(3, 4)
0405 void cat_printf(char **buf, size_t *size, const char *fmt, ...)
0406 {
0407 va_list args;
0408 int i;
0409
0410 if (*size == 0)
0411 return;
0412
0413 va_start(args, fmt);
0414 i = vsnprintf(*buf, *size, fmt, args);
0415 va_end(args);
0416
0417 if (i >= *size) {
0418 (*buf)[*size - 1] = '\0';
0419 *buf += *size;
0420 *size = 0;
0421 } else {
0422 *buf += i;
0423 *size -= i;
0424 }
0425 }
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static void pmap_print(unsigned long *map, int bits_per_period,
0441 int periods_in_map, const char *period_name,
0442 const char *units,
0443 void (*print_fn)(const char *str, void *data),
0444 void *print_data)
0445 {
0446 int period;
0447
0448 for (period = 0; period < periods_in_map; period++) {
0449 char tmp[64];
0450 char *buf = tmp;
0451 size_t buf_size = sizeof(tmp);
0452 int period_start = period * bits_per_period;
0453 int period_end = period_start + bits_per_period;
0454 int start = 0;
0455 int count = 0;
0456 bool printed = false;
0457 int i;
0458
0459 for (i = period_start; i < period_end + 1; i++) {
0460
0461 if (i < period_end &&
0462 bitmap_find_next_zero_area(map, i + 1,
0463 i, 1, 0) != i) {
0464 if (count == 0)
0465 start = i - period_start;
0466 count++;
0467 continue;
0468 }
0469
0470
0471 if (count == 0)
0472 continue;
0473
0474 if (!printed)
0475 cat_printf(&buf, &buf_size, "%s %d: ",
0476 period_name, period);
0477 else
0478 cat_printf(&buf, &buf_size, ", ");
0479 printed = true;
0480
0481 cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
0482 units, start + count - 1, units);
0483 count = 0;
0484 }
0485
0486 if (printed)
0487 print_fn(tmp, print_data);
0488 }
0489 }
0490
0491 struct dwc2_qh_print_data {
0492 struct dwc2_hsotg *hsotg;
0493 struct dwc2_qh *qh;
0494 };
0495
0496
0497
0498
0499
0500
0501
0502 static void dwc2_qh_print(const char *str, void *data)
0503 {
0504 struct dwc2_qh_print_data *print_data = data;
0505
0506 dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
0507 }
0508
0509
0510
0511
0512
0513
0514
0515 static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
0516 struct dwc2_qh *qh)
0517 {
0518 struct dwc2_qh_print_data print_data = { hsotg, qh };
0519 int i;
0520
0521
0522
0523
0524
0525
0526
0527 if (qh->schedule_low_speed) {
0528 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
0529
0530 dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
0531 qh, qh->device_us,
0532 DWC2_ROUND_US_TO_SLICE(qh->device_us),
0533 DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
0534
0535 if (map) {
0536 dwc2_sch_dbg(hsotg,
0537 "QH=%p Whole low/full speed map %p now:\n",
0538 qh, map);
0539 pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
0540 DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
0541 dwc2_qh_print, &print_data);
0542 }
0543 }
0544
0545 for (i = 0; i < qh->num_hs_transfers; i++) {
0546 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
0547 int uframe = trans_time->start_schedule_us /
0548 DWC2_HS_PERIODIC_US_PER_UFRAME;
0549 int rel_us = trans_time->start_schedule_us %
0550 DWC2_HS_PERIODIC_US_PER_UFRAME;
0551
0552 dwc2_sch_dbg(hsotg,
0553 "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
0554 qh, i, trans_time->duration_us, uframe, rel_us);
0555 }
0556 if (qh->num_hs_transfers) {
0557 dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
0558 pmap_print(hsotg->hs_periodic_bitmap,
0559 DWC2_HS_PERIODIC_US_PER_UFRAME,
0560 DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
0561 dwc2_qh_print, &print_data);
0562 }
0563 }
0564 #else
0565 static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
0566 struct dwc2_qh *qh) {};
0567 #endif
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0585 int search_slice)
0586 {
0587 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
0588 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
0589 int slice;
0590
0591 if (!map)
0592 return -EINVAL;
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606 slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
0607 DWC2_LS_SCHEDULE_FRAMES, slices,
0608 qh->device_interval, search_slice, false);
0609
0610 if (slice < 0)
0611 return slice;
0612
0613 qh->ls_start_schedule_slice = slice;
0614 return 0;
0615 }
0616
0617
0618
0619
0620
0621
0622
0623 static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
0624 struct dwc2_qh *qh)
0625 {
0626 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
0627 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
0628
0629
0630 if (!map)
0631 return;
0632
0633 pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
0634 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
0635 qh->ls_start_schedule_slice);
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0659 bool only_one_period, int index)
0660 {
0661 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
0662 int us;
0663
0664 us = pmap_schedule(hsotg->hs_periodic_bitmap,
0665 DWC2_HS_PERIODIC_US_PER_UFRAME,
0666 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
0667 qh->host_interval, trans_time->start_schedule_us,
0668 only_one_period);
0669
0670 if (us < 0)
0671 return us;
0672
0673 trans_time->start_schedule_us = us;
0674 return 0;
0675 }
0676
0677
0678
0679
0680
0681
0682
0683
0684 static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
0685 struct dwc2_qh *qh, int index)
0686 {
0687 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
0688
0689 pmap_unschedule(hsotg->hs_periodic_bitmap,
0690 DWC2_HS_PERIODIC_US_PER_UFRAME,
0691 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
0692 qh->host_interval, trans_time->start_schedule_us);
0693 }
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
0709 struct dwc2_qh *qh)
0710 {
0711 int bytecount = qh->maxp_mult * qh->maxp;
0712 int ls_search_slice;
0713 int err = 0;
0714 int host_interval_in_sched;
0715
0716
0717
0718
0719
0720 host_interval_in_sched = gcd(qh->host_interval,
0721 DWC2_HS_SCHEDULE_UFRAMES);
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734 ls_search_slice = 0;
0735
0736 while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
0737 int start_s_uframe;
0738 int ssplit_s_uframe;
0739 int second_s_uframe;
0740 int rel_uframe;
0741 int first_count;
0742 int middle_count;
0743 int end_count;
0744 int first_data_bytes;
0745 int other_data_bytes;
0746 int i;
0747
0748 if (qh->schedule_low_speed) {
0749 err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
0750
0751
0752
0753
0754
0755
0756
0757 if (err)
0758 return err;
0759 } else {
0760
0761 WARN_ON_ONCE(1);
0762 }
0763
0764
0765
0766
0767
0768 start_s_uframe = qh->ls_start_schedule_slice /
0769 DWC2_SLICES_PER_UFRAME;
0770
0771
0772 rel_uframe = (start_s_uframe % 8);
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782 if (rel_uframe == 7) {
0783 if (qh->schedule_low_speed)
0784 dwc2_ls_pmap_unschedule(hsotg, qh);
0785 ls_search_slice =
0786 (qh->ls_start_schedule_slice /
0787 DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
0788 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
0789 continue;
0790 }
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 ssplit_s_uframe = (start_s_uframe +
0824 host_interval_in_sched - 1) %
0825 host_interval_in_sched;
0826 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
0827 second_s_uframe = start_s_uframe;
0828 else
0829 second_s_uframe = start_s_uframe + 1;
0830
0831
0832 first_data_bytes = 188 -
0833 DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
0834 DWC2_SLICES_PER_UFRAME),
0835 DWC2_SLICES_PER_UFRAME);
0836 if (first_data_bytes > bytecount)
0837 first_data_bytes = bytecount;
0838 other_data_bytes = bytecount - first_data_bytes;
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855 if (!qh->ep_is_in &&
0856 (first_data_bytes != min_t(int, 188, bytecount))) {
0857 dwc2_sch_dbg(hsotg,
0858 "QH=%p avoiding broken 1st xfer (%d, %d)\n",
0859 qh, first_data_bytes, bytecount);
0860 if (qh->schedule_low_speed)
0861 dwc2_ls_pmap_unschedule(hsotg, qh);
0862 ls_search_slice = (start_s_uframe + 1) *
0863 DWC2_SLICES_PER_UFRAME;
0864 continue;
0865 }
0866
0867
0868 qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
0869
0870
0871
0872
0873
0874
0875 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
0876 if (rel_uframe == 6)
0877 qh->num_hs_transfers += 2;
0878 else
0879 qh->num_hs_transfers += 3;
0880
0881 if (qh->ep_is_in) {
0882
0883
0884
0885
0886 first_count = 4;
0887 middle_count = bytecount;
0888 end_count = bytecount;
0889 } else {
0890
0891
0892
0893
0894
0895 first_count = first_data_bytes;
0896 middle_count = max_t(int, 4, other_data_bytes);
0897 end_count = 4;
0898 }
0899 } else {
0900 if (qh->ep_is_in) {
0901 int last;
0902
0903
0904 qh->num_hs_transfers++;
0905
0906
0907 last = rel_uframe + qh->num_hs_transfers + 1;
0908
0909
0910 if (last <= 6)
0911 qh->num_hs_transfers += 2;
0912 else
0913 qh->num_hs_transfers += 1;
0914
0915
0916 if (last >= 6 && rel_uframe == 0)
0917 qh->num_hs_transfers--;
0918
0919
0920 first_count = 4;
0921 middle_count = min_t(int, 188, bytecount);
0922 end_count = middle_count;
0923 } else {
0924
0925 first_count = first_data_bytes;
0926 middle_count = min_t(int, 188,
0927 other_data_bytes);
0928 end_count = other_data_bytes % 188;
0929 }
0930 }
0931
0932
0933 qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
0934 for (i = 1; i < qh->num_hs_transfers - 1; i++)
0935 qh->hs_transfers[i].duration_us =
0936 HS_USECS_ISO(middle_count);
0937 if (qh->num_hs_transfers > 1)
0938 qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
0939 HS_USECS_ISO(end_count);
0940
0941
0942
0943
0944
0945
0946 qh->hs_transfers[0].start_schedule_us =
0947 ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
0948 for (i = 1; i < qh->num_hs_transfers; i++)
0949 qh->hs_transfers[i].start_schedule_us =
0950 ((second_s_uframe + i - 1) %
0951 DWC2_HS_SCHEDULE_UFRAMES) *
0952 DWC2_HS_PERIODIC_US_PER_UFRAME;
0953
0954
0955 for (i = 0; i < qh->num_hs_transfers; i++) {
0956 err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
0957 if (err)
0958 break;
0959 }
0960
0961
0962 if (i == qh->num_hs_transfers)
0963 break;
0964
0965 for (; i >= 0; i--)
0966 dwc2_hs_pmap_unschedule(hsotg, qh, i);
0967
0968 if (qh->schedule_low_speed)
0969 dwc2_ls_pmap_unschedule(hsotg, qh);
0970
0971
0972 ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
0973 }
0974
0975 if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
0976 return -ENOSPC;
0977
0978 return 0;
0979 }
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990 static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
0991 {
0992
0993 WARN_ON(qh->host_us != qh->device_us);
0994 WARN_ON(qh->host_interval != qh->device_interval);
0995 WARN_ON(qh->num_hs_transfers != 1);
0996
0997
0998 qh->hs_transfers[0].start_schedule_us = 0;
0999 qh->hs_transfers[0].duration_us = qh->host_us;
1000
1001 return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1014 {
1015
1016 WARN_ON(qh->host_us != qh->device_us);
1017 WARN_ON(qh->host_interval != qh->device_interval);
1018 WARN_ON(!qh->schedule_low_speed);
1019
1020
1021 return dwc2_ls_pmap_schedule(hsotg, qh, 0);
1022 }
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1034 {
1035 int ret;
1036
1037 if (qh->dev_speed == USB_SPEED_HIGH)
1038 ret = dwc2_uframe_schedule_hs(hsotg, qh);
1039 else if (!qh->do_split)
1040 ret = dwc2_uframe_schedule_ls(hsotg, qh);
1041 else
1042 ret = dwc2_uframe_schedule_split(hsotg, qh);
1043
1044 if (ret)
1045 dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
1046 else
1047 dwc2_qh_schedule_print(hsotg, qh);
1048
1049 return ret;
1050 }
1051
1052
1053
1054
1055
1056
1057
1058 static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1059 {
1060 int i;
1061
1062 for (i = 0; i < qh->num_hs_transfers; i++)
1063 dwc2_hs_pmap_unschedule(hsotg, qh, i);
1064
1065 if (qh->schedule_low_speed)
1066 dwc2_ls_pmap_unschedule(hsotg, qh);
1067
1068 dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1087 {
1088 u16 frame_number;
1089 u16 earliest_frame;
1090 u16 next_active_frame;
1091 u16 relative_frame;
1092 u16 interval;
1093
1094
1095
1096
1097
1098 frame_number = dwc2_hcd_get_frame_number(hsotg);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 earliest_frame = dwc2_frame_num_inc(frame_number, 1);
1109 next_active_frame = earliest_frame;
1110
1111
1112 if (!hsotg->params.uframe_sched) {
1113 if (qh->do_split)
1114
1115 next_active_frame |= 0x7;
1116 goto exit;
1117 }
1118
1119 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
1120
1121
1122
1123
1124
1125
1126 WARN_ON(qh->num_hs_transfers < 1);
1127
1128 relative_frame = qh->hs_transfers[0].start_schedule_us /
1129 DWC2_HS_PERIODIC_US_PER_UFRAME;
1130
1131
1132 interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
1133
1134 } else {
1135
1136
1137
1138
1139
1140
1141
1142 relative_frame = qh->ls_start_schedule_slice /
1143 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
1144 interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
1145 }
1146
1147
1148 WARN_ON(relative_frame >= interval);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 next_active_frame = (next_active_frame / interval) * interval;
1159
1160
1161
1162
1163
1164 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1165 relative_frame);
1166
1167
1168
1169
1170
1171
1172 next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
1173
1174
1175
1176
1177
1178 while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
1179 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1180 interval);
1181
1182 exit:
1183 qh->next_active_frame = next_active_frame;
1184 qh->start_active_frame = next_active_frame;
1185
1186 dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
1187 qh, frame_number, qh->next_active_frame);
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1202 {
1203 int status;
1204
1205 if (hsotg->params.uframe_sched) {
1206 status = dwc2_uframe_schedule(hsotg, qh);
1207 } else {
1208 status = dwc2_periodic_channel_available(hsotg);
1209 if (status) {
1210 dev_info(hsotg->dev,
1211 "%s: No host channel available for periodic transfer\n",
1212 __func__);
1213 return status;
1214 }
1215
1216 status = dwc2_check_periodic_bandwidth(hsotg, qh);
1217 }
1218
1219 if (status) {
1220 dev_dbg(hsotg->dev,
1221 "%s: Insufficient periodic bandwidth for periodic transfer\n",
1222 __func__);
1223 return status;
1224 }
1225
1226 if (!hsotg->params.uframe_sched)
1227
1228 hsotg->periodic_channels++;
1229
1230
1231 hsotg->periodic_usecs += qh->host_us;
1232
1233 dwc2_pick_first_frame(hsotg, qh);
1234
1235 return 0;
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1248 {
1249 assert_spin_locked(&hsotg->lock);
1250
1251 WARN_ON(!qh->unreserve_pending);
1252
1253
1254 qh->unreserve_pending = false;
1255
1256 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
1257 list_del_init(&qh->qh_list_entry);
1258
1259
1260 hsotg->periodic_usecs -= qh->host_us;
1261
1262 if (hsotg->params.uframe_sched) {
1263 dwc2_uframe_unschedule(hsotg, qh);
1264 } else {
1265
1266 hsotg->periodic_channels--;
1267 }
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 static void dwc2_unreserve_timer_fn(struct timer_list *t)
1283 {
1284 struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
1285 struct dwc2_hsotg *hsotg = qh->hsotg;
1286 unsigned long flags;
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
1298 if (timer_pending(&qh->unreserve_timer))
1299 return;
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 if (qh->unreserve_pending)
1313 dwc2_do_unreserve(hsotg, qh);
1314
1315 spin_unlock_irqrestore(&hsotg->lock, flags);
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
1329 struct dwc2_qh *qh)
1330 {
1331 u32 max_xfer_size;
1332 u32 max_channel_xfer_size;
1333 int status = 0;
1334
1335 max_xfer_size = qh->maxp * qh->maxp_mult;
1336 max_channel_xfer_size = hsotg->params.max_transfer_size;
1337
1338 if (max_xfer_size > max_channel_xfer_size) {
1339 dev_err(hsotg->dev,
1340 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
1341 __func__, max_xfer_size, max_channel_xfer_size);
1342 status = -ENOSPC;
1343 }
1344
1345 return status;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1359 {
1360 int status;
1361
1362 status = dwc2_check_max_xfer_size(hsotg, qh);
1363 if (status) {
1364 dev_dbg(hsotg->dev,
1365 "%s: Channel max transfer size too small for periodic transfer\n",
1366 __func__);
1367 return status;
1368 }
1369
1370
1371 if (del_timer(&qh->unreserve_timer))
1372 WARN_ON(!qh->unreserve_pending);
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (!qh->unreserve_pending) {
1382 status = dwc2_do_reserve(hsotg, qh);
1383 if (status)
1384 return status;
1385 } else {
1386
1387
1388
1389
1390
1391
1392 if (dwc2_frame_num_le(qh->next_active_frame,
1393 hsotg->frame_number))
1394 dwc2_pick_first_frame(hsotg, qh);
1395 }
1396
1397 qh->unreserve_pending = 0;
1398
1399 if (hsotg->params.dma_desc_enable)
1400
1401 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
1402 else
1403
1404 list_add_tail(&qh->qh_list_entry,
1405 &hsotg->periodic_sched_inactive);
1406
1407 return 0;
1408 }
1409
1410
1411
1412
1413
1414
1415
1416
1417 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
1418 struct dwc2_qh *qh)
1419 {
1420 bool did_modify;
1421
1422 assert_spin_locked(&hsotg->lock);
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 did_modify = mod_timer(&qh->unreserve_timer,
1440 jiffies + DWC2_UNRESERVE_DELAY + 1);
1441 WARN_ON(did_modify);
1442 qh->unreserve_pending = 1;
1443
1444 list_del_init(&qh->qh_list_entry);
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
1471 {
1472 struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
1473 struct dwc2_hsotg *hsotg = qh->hsotg;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&hsotg->lock, flags);
1477
1478
1479
1480
1481
1482 if (!qh->wait_timer_cancel) {
1483 enum dwc2_transaction_type tr_type;
1484
1485 qh->want_wait = false;
1486
1487 list_move(&qh->qh_list_entry,
1488 &hsotg->non_periodic_sched_inactive);
1489
1490 tr_type = dwc2_hcd_select_transactions(hsotg);
1491 if (tr_type != DWC2_TRANSACTION_NONE)
1492 dwc2_hcd_queue_transactions(hsotg, tr_type);
1493 }
1494
1495 spin_unlock_irqrestore(&hsotg->lock, flags);
1496 return HRTIMER_NORESTART;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1509 struct dwc2_hcd_urb *urb, gfp_t mem_flags)
1510 {
1511 int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
1512 u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1513 bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
1514 bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
1515 bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
1516 u32 hprt = dwc2_readl(hsotg, HPRT0);
1517 u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1518 bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
1519 dev_speed != USB_SPEED_HIGH);
1520 int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
1521 int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
1522 int bytecount = maxp_mult * maxp;
1523 char *speed, *type;
1524
1525
1526 qh->hsotg = hsotg;
1527 timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
1528 hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1529 qh->wait_timer.function = &dwc2_wait_timer_fn;
1530 qh->ep_type = ep_type;
1531 qh->ep_is_in = ep_is_in;
1532
1533 qh->data_toggle = DWC2_HC_PID_DATA0;
1534 qh->maxp = maxp;
1535 qh->maxp_mult = maxp_mult;
1536 INIT_LIST_HEAD(&qh->qtd_list);
1537 INIT_LIST_HEAD(&qh->qh_list_entry);
1538
1539 qh->do_split = do_split;
1540 qh->dev_speed = dev_speed;
1541
1542 if (ep_is_int || ep_is_isoc) {
1543
1544 int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
1545 struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
1546 mem_flags,
1547 &qh->ttport);
1548 int device_ns;
1549
1550 qh->dwc_tt = dwc_tt;
1551
1552 qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
1553 ep_is_isoc, bytecount));
1554 device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
1555 ep_is_isoc, bytecount);
1556
1557 if (do_split && dwc_tt)
1558 device_ns += dwc_tt->usb_tt->think_time;
1559 qh->device_us = NS_TO_US(device_ns);
1560
1561 qh->device_interval = urb->interval;
1562 qh->host_interval = urb->interval * (do_split ? 8 : 1);
1563
1564
1565
1566
1567
1568
1569 qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
1570 dwc_tt;
1571
1572 if (do_split) {
1573
1574 qh->num_hs_transfers = -1;
1575 } else if (dev_speed == USB_SPEED_HIGH) {
1576 qh->num_hs_transfers = 1;
1577 } else {
1578 qh->num_hs_transfers = 0;
1579 }
1580
1581
1582 }
1583
1584 switch (dev_speed) {
1585 case USB_SPEED_LOW:
1586 speed = "low";
1587 break;
1588 case USB_SPEED_FULL:
1589 speed = "full";
1590 break;
1591 case USB_SPEED_HIGH:
1592 speed = "high";
1593 break;
1594 default:
1595 speed = "?";
1596 break;
1597 }
1598
1599 switch (qh->ep_type) {
1600 case USB_ENDPOINT_XFER_ISOC:
1601 type = "isochronous";
1602 break;
1603 case USB_ENDPOINT_XFER_INT:
1604 type = "interrupt";
1605 break;
1606 case USB_ENDPOINT_XFER_CONTROL:
1607 type = "control";
1608 break;
1609 case USB_ENDPOINT_XFER_BULK:
1610 type = "bulk";
1611 break;
1612 default:
1613 type = "?";
1614 break;
1615 }
1616
1617 dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
1618 speed, bytecount);
1619 dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
1620 dwc2_hcd_get_dev_addr(&urb->pipe_info),
1621 dwc2_hcd_get_ep_num(&urb->pipe_info),
1622 ep_is_in ? "IN" : "OUT");
1623 if (ep_is_int || ep_is_isoc) {
1624 dwc2_sch_dbg(hsotg,
1625 "QH=%p ...duration: host=%d us, device=%d us\n",
1626 qh, qh->host_us, qh->device_us);
1627 dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
1628 qh, qh->host_interval, qh->device_interval);
1629 if (qh->schedule_low_speed)
1630 dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
1631 qh, dwc2_get_ls_map(hsotg, qh));
1632 }
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
1646 struct dwc2_hcd_urb *urb,
1647 gfp_t mem_flags)
1648 {
1649 struct dwc2_qh *qh;
1650
1651 if (!urb->priv)
1652 return NULL;
1653
1654
1655 qh = kzalloc(sizeof(*qh), mem_flags);
1656 if (!qh)
1657 return NULL;
1658
1659 dwc2_qh_init(hsotg, qh, urb, mem_flags);
1660
1661 if (hsotg->params.dma_desc_enable &&
1662 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
1663 dwc2_hcd_qh_free(hsotg, qh);
1664 return NULL;
1665 }
1666
1667 return qh;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1682 {
1683
1684 if (del_timer_sync(&qh->unreserve_timer)) {
1685 unsigned long flags;
1686
1687 spin_lock_irqsave(&hsotg->lock, flags);
1688 dwc2_do_unreserve(hsotg, qh);
1689 spin_unlock_irqrestore(&hsotg->lock, flags);
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699 hrtimer_cancel(&qh->wait_timer);
1700
1701 dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
1702
1703 if (qh->desc_list)
1704 dwc2_hcd_qh_free_ddma(hsotg, qh);
1705 else if (hsotg->unaligned_cache && qh->dw_align_buf)
1706 kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
1707
1708 kfree(qh);
1709 }
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1722 {
1723 int status;
1724 u32 intr_mask;
1725 ktime_t delay;
1726
1727 if (dbg_qh(qh))
1728 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1729
1730 if (!list_empty(&qh->qh_list_entry))
1731
1732 return 0;
1733
1734
1735 if (dwc2_qh_is_non_per(qh)) {
1736
1737 qh->start_active_frame = hsotg->frame_number;
1738 qh->next_active_frame = qh->start_active_frame;
1739
1740 if (qh->want_wait) {
1741 list_add_tail(&qh->qh_list_entry,
1742 &hsotg->non_periodic_sched_waiting);
1743 qh->wait_timer_cancel = false;
1744 delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
1745 hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
1746 } else {
1747 list_add_tail(&qh->qh_list_entry,
1748 &hsotg->non_periodic_sched_inactive);
1749 }
1750 return 0;
1751 }
1752
1753 status = dwc2_schedule_periodic(hsotg, qh);
1754 if (status)
1755 return status;
1756 if (!hsotg->periodic_qh_count) {
1757 intr_mask = dwc2_readl(hsotg, GINTMSK);
1758 intr_mask |= GINTSTS_SOF;
1759 dwc2_writel(hsotg, intr_mask, GINTMSK);
1760 }
1761 hsotg->periodic_qh_count++;
1762
1763 return 0;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1774 {
1775 u32 intr_mask;
1776
1777 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1778
1779
1780 qh->wait_timer_cancel = true;
1781
1782 if (list_empty(&qh->qh_list_entry))
1783
1784 return;
1785
1786 if (dwc2_qh_is_non_per(qh)) {
1787 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
1788 hsotg->non_periodic_qh_ptr =
1789 hsotg->non_periodic_qh_ptr->next;
1790 list_del_init(&qh->qh_list_entry);
1791 return;
1792 }
1793
1794 dwc2_deschedule_periodic(hsotg, qh);
1795 hsotg->periodic_qh_count--;
1796 if (!hsotg->periodic_qh_count &&
1797 !hsotg->params.dma_desc_enable) {
1798 intr_mask = dwc2_readl(hsotg, GINTMSK);
1799 intr_mask &= ~GINTSTS_SOF;
1800 dwc2_writel(hsotg, intr_mask, GINTMSK);
1801 }
1802 }
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
1824 struct dwc2_qh *qh, u16 frame_number)
1825 {
1826 u16 old_frame = qh->next_active_frame;
1827 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1828 int missed = 0;
1829 u16 incr;
1830
1831
1832
1833
1834
1835
1836
1837 if (old_frame == qh->start_active_frame &&
1838 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
1839 incr = 2;
1840 else
1841 incr = 1;
1842
1843 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
1854
1855
1856
1857
1858 missed = dwc2_frame_num_dec(prev_frame_number,
1859 qh->next_active_frame);
1860 qh->next_active_frame = frame_number;
1861 }
1862
1863 return missed;
1864 }
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
1887 struct dwc2_qh *qh, u16 frame_number)
1888 {
1889 int missed = 0;
1890 u16 interval = qh->host_interval;
1891 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1892
1893 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
1894 interval);
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904 if (interval >= 0x1000)
1905 goto exit;
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 if (qh->start_active_frame == qh->next_active_frame ||
1934 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
1935 u16 ideal_start = qh->start_active_frame;
1936 int periods_in_map;
1937
1938
1939
1940
1941
1942 if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
1943 periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
1944 else
1945 periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
1946 interval = gcd(interval, periods_in_map);
1947
1948 do {
1949 qh->start_active_frame = dwc2_frame_num_inc(
1950 qh->start_active_frame, interval);
1951 } while (dwc2_frame_num_gt(prev_frame_number,
1952 qh->start_active_frame));
1953
1954 missed = dwc2_frame_num_dec(qh->start_active_frame,
1955 ideal_start);
1956 }
1957
1958 exit:
1959 qh->next_active_frame = qh->start_active_frame;
1960
1961 return missed;
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1978 int sched_next_periodic_split)
1979 {
1980 u16 old_frame = qh->next_active_frame;
1981 u16 frame_number;
1982 int missed;
1983
1984 if (dbg_qh(qh))
1985 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1986
1987 if (dwc2_qh_is_non_per(qh)) {
1988 dwc2_hcd_qh_unlink(hsotg, qh);
1989 if (!list_empty(&qh->qtd_list))
1990
1991 dwc2_hcd_qh_add(hsotg, qh);
1992 return;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001 frame_number = dwc2_hcd_get_frame_number(hsotg);
2002
2003 if (sched_next_periodic_split)
2004 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
2005 else
2006 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
2007
2008 dwc2_sch_vdbg(hsotg,
2009 "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
2010 qh, sched_next_periodic_split, frame_number, old_frame,
2011 qh->next_active_frame,
2012 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
2013 missed, missed ? "MISS" : "");
2014
2015 if (list_empty(&qh->qtd_list)) {
2016 dwc2_hcd_qh_unlink(hsotg, qh);
2017 return;
2018 }
2019
2020
2021
2022
2023
2024
2025
2026
2027 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
2028 list_move_tail(&qh->qh_list_entry,
2029 &hsotg->periodic_sched_ready);
2030 else
2031 list_move_tail(&qh->qh_list_entry,
2032 &hsotg->periodic_sched_inactive);
2033 }
2034
2035
2036
2037
2038
2039
2040
2041 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
2042 {
2043 qtd->urb = urb;
2044 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
2045 USB_ENDPOINT_XFER_CONTROL) {
2046
2047
2048
2049
2050
2051 qtd->data_toggle = DWC2_HC_PID_DATA1;
2052 qtd->control_phase = DWC2_CONTROL_SETUP;
2053 }
2054
2055
2056 qtd->complete_split = 0;
2057 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
2058 qtd->isoc_split_offset = 0;
2059 qtd->in_process = 0;
2060
2061
2062 urb->qtd = qtd;
2063 }
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
2079 struct dwc2_qh *qh)
2080 {
2081 int retval;
2082
2083 if (unlikely(!qh)) {
2084 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
2085 retval = -EINVAL;
2086 goto fail;
2087 }
2088
2089 retval = dwc2_hcd_qh_add(hsotg, qh);
2090 if (retval)
2091 goto fail;
2092
2093 qtd->qh = qh;
2094 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
2095
2096 return 0;
2097 fail:
2098 return retval;
2099 }