0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/delay.h>
0011 #include <linux/device.h>
0012 #include <linux/dmapool.h>
0013 #include <linux/err.h>
0014 #include <linux/irqreturn.h>
0015 #include <linux/kernel.h>
0016 #include <linux/slab.h>
0017 #include <linux/pm_runtime.h>
0018 #include <linux/pinctrl/consumer.h>
0019 #include <linux/usb/ch9.h>
0020 #include <linux/usb/gadget.h>
0021 #include <linux/usb/otg-fsm.h>
0022 #include <linux/usb/chipidea.h>
0023
0024 #include "ci.h"
0025 #include "udc.h"
0026 #include "bits.h"
0027 #include "otg.h"
0028 #include "otg_fsm.h"
0029 #include "trace.h"
0030
0031
0032 static const struct usb_endpoint_descriptor
0033 ctrl_endpt_out_desc = {
0034 .bLength = USB_DT_ENDPOINT_SIZE,
0035 .bDescriptorType = USB_DT_ENDPOINT,
0036
0037 .bEndpointAddress = USB_DIR_OUT,
0038 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
0039 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
0040 };
0041
0042 static const struct usb_endpoint_descriptor
0043 ctrl_endpt_in_desc = {
0044 .bLength = USB_DT_ENDPOINT_SIZE,
0045 .bDescriptorType = USB_DT_ENDPOINT,
0046
0047 .bEndpointAddress = USB_DIR_IN,
0048 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
0049 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
0050 };
0051
0052 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
0053 struct td_node *node);
0054
0055
0056
0057
0058
0059
0060
0061 static inline int hw_ep_bit(int num, int dir)
0062 {
0063 return num + ((dir == TX) ? 16 : 0);
0064 }
0065
0066 static inline int ep_to_bit(struct ci_hdrc *ci, int n)
0067 {
0068 int fill = 16 - ci->hw_ep_max / 2;
0069
0070 if (n >= ci->hw_ep_max / 2)
0071 n += fill;
0072
0073 return n;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083 static int hw_device_state(struct ci_hdrc *ci, u32 dma)
0084 {
0085 if (dma) {
0086 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
0087
0088 hw_write(ci, OP_USBINTR, ~0,
0089 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
0090 } else {
0091 hw_write(ci, OP_USBINTR, ~0, 0);
0092 }
0093 return 0;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
0105 {
0106 int n = hw_ep_bit(num, dir);
0107
0108 do {
0109
0110 hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
0111 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
0112 cpu_relax();
0113 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
0114
0115 return 0;
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
0127 {
0128 hw_write(ci, OP_ENDPTCTRL + num,
0129 (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
0130 return 0;
0131 }
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
0143 {
0144 u32 mask, data;
0145
0146 if (dir == TX) {
0147 mask = ENDPTCTRL_TXT;
0148 data = type << __ffs(mask);
0149
0150 mask |= ENDPTCTRL_TXS;
0151 mask |= ENDPTCTRL_TXR;
0152 data |= ENDPTCTRL_TXR;
0153 mask |= ENDPTCTRL_TXE;
0154 data |= ENDPTCTRL_TXE;
0155 } else {
0156 mask = ENDPTCTRL_RXT;
0157 data = type << __ffs(mask);
0158
0159 mask |= ENDPTCTRL_RXS;
0160 mask |= ENDPTCTRL_RXR;
0161 data |= ENDPTCTRL_RXR;
0162 mask |= ENDPTCTRL_RXE;
0163 data |= ENDPTCTRL_RXE;
0164 }
0165 hw_write(ci, OP_ENDPTCTRL + num, mask, data);
0166 return 0;
0167 }
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
0178 {
0179 u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
0180
0181 return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
0182 }
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
0194 {
0195 int n = hw_ep_bit(num, dir);
0196
0197
0198 wmb();
0199
0200 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
0201 return -EAGAIN;
0202
0203 hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
0204
0205 while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
0206 cpu_relax();
0207 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
0208 return -EAGAIN;
0209
0210
0211 return 0;
0212 }
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
0225 {
0226 if (value != 0 && value != 1)
0227 return -EINVAL;
0228
0229 do {
0230 enum ci_hw_regs reg = OP_ENDPTCTRL + num;
0231 u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
0232 u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
0233
0234
0235 hw_write(ci, reg, mask_xs|mask_xr,
0236 value ? mask_xs : mask_xr);
0237 } while (value != hw_ep_get_halt(ci, num, dir));
0238
0239 return 0;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248 static int hw_port_is_high_speed(struct ci_hdrc *ci)
0249 {
0250 return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
0251 hw_read(ci, OP_PORTSC, PORTSC_HSP);
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
0263 {
0264 n = ep_to_bit(ci, n);
0265 return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
0266 }
0267
0268
0269
0270
0271
0272
0273
0274
0275 static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
0276 {
0277 u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
0278
0279 hw_write(ci, OP_USBSTS, ~0, reg);
0280 return reg;
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290 static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
0291 {
0292 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302 static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
0303 {
0304 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
0316 {
0317 hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
0318 value << __ffs(DEVICEADDR_USBADR));
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328 static int hw_usb_reset(struct ci_hdrc *ci)
0329 {
0330 hw_usb_set_address(ci, 0);
0331
0332
0333 hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
0334
0335
0336 hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0);
0337
0338
0339 hw_write(ci, OP_ENDPTCOMPLETE, 0, 0);
0340
0341
0342 while (hw_read(ci, OP_ENDPTPRIME, ~0))
0343 udelay(10);
0344
0345
0346
0347
0348
0349
0350 return 0;
0351 }
0352
0353
0354
0355
0356
0357 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
0358 unsigned int length, struct scatterlist *s)
0359 {
0360 int i;
0361 u32 temp;
0362 struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
0363 GFP_ATOMIC);
0364
0365 if (node == NULL)
0366 return -ENOMEM;
0367
0368 node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
0369 if (node->ptr == NULL) {
0370 kfree(node);
0371 return -ENOMEM;
0372 }
0373
0374 node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
0375 node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
0376 node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
0377 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
0378 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
0379
0380 if (hwreq->req.length == 0
0381 || hwreq->req.length % hwep->ep.maxpacket)
0382 mul++;
0383 node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
0384 }
0385
0386 if (s) {
0387 temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
0388 node->td_remaining_size = CI_MAX_BUF_SIZE - length;
0389 } else {
0390 temp = (u32) (hwreq->req.dma + hwreq->req.actual);
0391 }
0392
0393 if (length) {
0394 node->ptr->page[0] = cpu_to_le32(temp);
0395 for (i = 1; i < TD_PAGE_COUNT; i++) {
0396 u32 page = temp + i * CI_HDRC_PAGE_SIZE;
0397 page &= ~TD_RESERVED_MASK;
0398 node->ptr->page[i] = cpu_to_le32(page);
0399 }
0400 }
0401
0402 hwreq->req.actual += length;
0403
0404 if (!list_empty(&hwreq->tds)) {
0405
0406 lastnode = list_entry(hwreq->tds.prev,
0407 struct td_node, td);
0408 lastnode->ptr->next = cpu_to_le32(node->dma);
0409 }
0410
0411 INIT_LIST_HEAD(&node->td);
0412 list_add_tail(&node->td, &hwreq->tds);
0413
0414 return 0;
0415 }
0416
0417
0418
0419
0420
0421 static inline u8 _usb_addr(struct ci_hw_ep *ep)
0422 {
0423 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
0424 }
0425
0426 static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
0427 struct ci_hw_req *hwreq)
0428 {
0429 unsigned int rest = hwreq->req.length;
0430 int pages = TD_PAGE_COUNT;
0431 int ret = 0;
0432
0433 if (rest == 0) {
0434 ret = add_td_to_list(hwep, hwreq, 0, NULL);
0435 if (ret < 0)
0436 return ret;
0437 }
0438
0439
0440
0441
0442
0443 if (hwreq->req.dma % PAGE_SIZE)
0444 pages--;
0445
0446 while (rest > 0) {
0447 unsigned int count = min(hwreq->req.length - hwreq->req.actual,
0448 (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
0449
0450 ret = add_td_to_list(hwep, hwreq, count, NULL);
0451 if (ret < 0)
0452 return ret;
0453
0454 rest -= count;
0455 }
0456
0457 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
0458 && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
0459 ret = add_td_to_list(hwep, hwreq, 0, NULL);
0460 if (ret < 0)
0461 return ret;
0462 }
0463
0464 return ret;
0465 }
0466
0467 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
0468 struct scatterlist *s)
0469 {
0470 unsigned int rest = sg_dma_len(s);
0471 int ret = 0;
0472
0473 hwreq->req.actual = 0;
0474 while (rest > 0) {
0475 unsigned int count = min_t(unsigned int, rest,
0476 CI_MAX_BUF_SIZE);
0477
0478 ret = add_td_to_list(hwep, hwreq, count, s);
0479 if (ret < 0)
0480 return ret;
0481
0482 rest -= count;
0483 }
0484
0485 return ret;
0486 }
0487
0488 static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
0489 {
0490 int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
0491 / CI_HDRC_PAGE_SIZE;
0492 int i;
0493 u32 token;
0494
0495 token = le32_to_cpu(node->ptr->token) + (sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
0496 node->ptr->token = cpu_to_le32(token);
0497
0498 for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
0499 u32 page = (u32) sg_dma_address(s) +
0500 (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
0501
0502 page &= ~TD_RESERVED_MASK;
0503 node->ptr->page[i] = cpu_to_le32(page);
0504 }
0505 }
0506
0507 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
0508 {
0509 struct usb_request *req = &hwreq->req;
0510 struct scatterlist *s = req->sg;
0511 int ret = 0, i = 0;
0512 struct td_node *node = NULL;
0513
0514 if (!s || req->zero || req->length == 0) {
0515 dev_err(hwep->ci->dev, "not supported operation for sg\n");
0516 return -EINVAL;
0517 }
0518
0519 while (i++ < req->num_mapped_sgs) {
0520 if (sg_dma_address(s) % PAGE_SIZE) {
0521 dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
0522 return -EINVAL;
0523 }
0524
0525 if (node && (node->td_remaining_size >= sg_dma_len(s))) {
0526 ci_add_buffer_entry(node, s);
0527 node->td_remaining_size -= sg_dma_len(s);
0528 } else {
0529 ret = prepare_td_per_sg(hwep, hwreq, s);
0530 if (ret)
0531 return ret;
0532
0533 node = list_entry(hwreq->tds.prev,
0534 struct td_node, td);
0535 }
0536
0537 s = sg_next(s);
0538 }
0539
0540 return ret;
0541 }
0542
0543
0544
0545
0546
0547
0548
0549
0550 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
0551 {
0552 struct ci_hdrc *ci = hwep->ci;
0553 int ret = 0;
0554 struct td_node *firstnode, *lastnode;
0555
0556
0557 if (hwreq->req.status == -EALREADY)
0558 return -EALREADY;
0559
0560 hwreq->req.status = -EALREADY;
0561
0562 ret = usb_gadget_map_request_by_dev(ci->dev->parent,
0563 &hwreq->req, hwep->dir);
0564 if (ret)
0565 return ret;
0566
0567 if (hwreq->req.num_mapped_sgs)
0568 ret = prepare_td_for_sg(hwep, hwreq);
0569 else
0570 ret = prepare_td_for_non_sg(hwep, hwreq);
0571
0572 if (ret)
0573 return ret;
0574
0575 lastnode = list_entry(hwreq->tds.prev,
0576 struct td_node, td);
0577
0578 lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
0579 if (!hwreq->req.no_interrupt)
0580 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
0581
0582 list_for_each_entry_safe(firstnode, lastnode, &hwreq->tds, td)
0583 trace_ci_prepare_td(hwep, hwreq, firstnode);
0584
0585 firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
0586
0587 wmb();
0588
0589 hwreq->req.actual = 0;
0590 if (!list_empty(&hwep->qh.queue)) {
0591 struct ci_hw_req *hwreqprev;
0592 int n = hw_ep_bit(hwep->num, hwep->dir);
0593 int tmp_stat;
0594 struct td_node *prevlastnode;
0595 u32 next = firstnode->dma & TD_ADDR_MASK;
0596
0597 hwreqprev = list_entry(hwep->qh.queue.prev,
0598 struct ci_hw_req, queue);
0599 prevlastnode = list_entry(hwreqprev->tds.prev,
0600 struct td_node, td);
0601
0602 prevlastnode->ptr->next = cpu_to_le32(next);
0603 wmb();
0604
0605 if (ci->rev == CI_REVISION_22) {
0606 if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
0607 reprime_dtd(ci, hwep, prevlastnode);
0608 }
0609
0610 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
0611 goto done;
0612 do {
0613 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
0614 tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
0615 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
0616 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
0617 if (tmp_stat)
0618 goto done;
0619 }
0620
0621
0622 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
0623 hwep->qh.ptr->td.token &=
0624 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
0625
0626 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
0627 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
0628
0629 if (hwreq->req.length == 0
0630 || hwreq->req.length % hwep->ep.maxpacket)
0631 mul++;
0632 hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
0633 }
0634
0635 ret = hw_ep_prime(ci, hwep->num, hwep->dir,
0636 hwep->type == USB_ENDPOINT_XFER_CONTROL);
0637 done:
0638 return ret;
0639 }
0640
0641
0642
0643
0644
0645 static void free_pending_td(struct ci_hw_ep *hwep)
0646 {
0647 struct td_node *pending = hwep->pending_td;
0648
0649 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
0650 hwep->pending_td = NULL;
0651 kfree(pending);
0652 }
0653
0654 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
0655 struct td_node *node)
0656 {
0657 hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
0658 hwep->qh.ptr->td.token &=
0659 cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
0660
0661 return hw_ep_prime(ci, hwep->num, hwep->dir,
0662 hwep->type == USB_ENDPOINT_XFER_CONTROL);
0663 }
0664
0665
0666
0667
0668
0669
0670
0671
0672 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
0673 {
0674 u32 tmptoken;
0675 struct td_node *node, *tmpnode;
0676 unsigned remaining_length;
0677 unsigned actual = hwreq->req.length;
0678 struct ci_hdrc *ci = hwep->ci;
0679
0680 if (hwreq->req.status != -EALREADY)
0681 return -EINVAL;
0682
0683 hwreq->req.status = 0;
0684
0685 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
0686 tmptoken = le32_to_cpu(node->ptr->token);
0687 trace_ci_complete_td(hwep, hwreq, node);
0688 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
0689 int n = hw_ep_bit(hwep->num, hwep->dir);
0690
0691 if (ci->rev == CI_REVISION_24)
0692 if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
0693 reprime_dtd(ci, hwep, node);
0694 hwreq->req.status = -EALREADY;
0695 return -EBUSY;
0696 }
0697
0698 remaining_length = (tmptoken & TD_TOTAL_BYTES);
0699 remaining_length >>= __ffs(TD_TOTAL_BYTES);
0700 actual -= remaining_length;
0701
0702 hwreq->req.status = tmptoken & TD_STATUS;
0703 if ((TD_STATUS_HALTED & hwreq->req.status)) {
0704 hwreq->req.status = -EPIPE;
0705 break;
0706 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
0707 hwreq->req.status = -EPROTO;
0708 break;
0709 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
0710 hwreq->req.status = -EILSEQ;
0711 break;
0712 }
0713
0714 if (remaining_length) {
0715 if (hwep->dir == TX) {
0716 hwreq->req.status = -EPROTO;
0717 break;
0718 }
0719 }
0720
0721
0722
0723
0724
0725 if (hwep->pending_td)
0726 free_pending_td(hwep);
0727
0728 hwep->pending_td = node;
0729 list_del_init(&node->td);
0730 }
0731
0732 usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
0733 &hwreq->req, hwep->dir);
0734
0735 hwreq->req.actual += actual;
0736
0737 if (hwreq->req.status)
0738 return hwreq->req.status;
0739
0740 return hwreq->req.actual;
0741 }
0742
0743
0744
0745
0746
0747
0748
0749
0750 static int _ep_nuke(struct ci_hw_ep *hwep)
0751 __releases(hwep->lock)
0752 __acquires(hwep->lock)
0753 {
0754 struct td_node *node, *tmpnode;
0755 if (hwep == NULL)
0756 return -EINVAL;
0757
0758 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
0759
0760 while (!list_empty(&hwep->qh.queue)) {
0761
0762
0763 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
0764 struct ci_hw_req, queue);
0765
0766 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
0767 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
0768 list_del_init(&node->td);
0769 node->ptr = NULL;
0770 kfree(node);
0771 }
0772
0773 list_del_init(&hwreq->queue);
0774 hwreq->req.status = -ESHUTDOWN;
0775
0776 if (hwreq->req.complete != NULL) {
0777 spin_unlock(hwep->lock);
0778 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
0779 spin_lock(hwep->lock);
0780 }
0781 }
0782
0783 if (hwep->pending_td)
0784 free_pending_td(hwep);
0785
0786 return 0;
0787 }
0788
0789 static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
0790 {
0791 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
0792 int direction, retval = 0;
0793 unsigned long flags;
0794
0795 if (ep == NULL || hwep->ep.desc == NULL)
0796 return -EINVAL;
0797
0798 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
0799 return -EOPNOTSUPP;
0800
0801 spin_lock_irqsave(hwep->lock, flags);
0802
0803 if (value && hwep->dir == TX && check_transfer &&
0804 !list_empty(&hwep->qh.queue) &&
0805 !usb_endpoint_xfer_control(hwep->ep.desc)) {
0806 spin_unlock_irqrestore(hwep->lock, flags);
0807 return -EAGAIN;
0808 }
0809
0810 direction = hwep->dir;
0811 do {
0812 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
0813
0814 if (!value)
0815 hwep->wedge = 0;
0816
0817 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
0818 hwep->dir = (hwep->dir == TX) ? RX : TX;
0819
0820 } while (hwep->dir != direction);
0821
0822 spin_unlock_irqrestore(hwep->lock, flags);
0823 return retval;
0824 }
0825
0826
0827
0828
0829
0830
0831
0832
0833 static int _gadget_stop_activity(struct usb_gadget *gadget)
0834 {
0835 struct usb_ep *ep;
0836 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
0837 unsigned long flags;
0838
0839
0840 gadget_for_each_ep(ep, gadget) {
0841 usb_ep_fifo_flush(ep);
0842 }
0843 usb_ep_fifo_flush(&ci->ep0out->ep);
0844 usb_ep_fifo_flush(&ci->ep0in->ep);
0845
0846
0847 gadget_for_each_ep(ep, gadget) {
0848 usb_ep_disable(ep);
0849 }
0850
0851 if (ci->status != NULL) {
0852 usb_ep_free_request(&ci->ep0in->ep, ci->status);
0853 ci->status = NULL;
0854 }
0855
0856 spin_lock_irqsave(&ci->lock, flags);
0857 ci->gadget.speed = USB_SPEED_UNKNOWN;
0858 ci->remote_wakeup = 0;
0859 ci->suspended = 0;
0860 spin_unlock_irqrestore(&ci->lock, flags);
0861
0862 return 0;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874 static void isr_reset_handler(struct ci_hdrc *ci)
0875 __releases(ci->lock)
0876 __acquires(ci->lock)
0877 {
0878 int retval;
0879
0880 spin_unlock(&ci->lock);
0881 if (ci->gadget.speed != USB_SPEED_UNKNOWN)
0882 usb_gadget_udc_reset(&ci->gadget, ci->driver);
0883
0884 retval = _gadget_stop_activity(&ci->gadget);
0885 if (retval)
0886 goto done;
0887
0888 retval = hw_usb_reset(ci);
0889 if (retval)
0890 goto done;
0891
0892 ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
0893 if (ci->status == NULL)
0894 retval = -ENOMEM;
0895
0896 done:
0897 spin_lock(&ci->lock);
0898
0899 if (retval)
0900 dev_err(ci->dev, "error: %i\n", retval);
0901 }
0902
0903
0904
0905
0906
0907
0908
0909
0910 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
0911 {
0912 if (ep == NULL || req == NULL)
0913 return;
0914
0915 kfree(req->buf);
0916 usb_ep_free_request(ep, req);
0917 }
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
0929 gfp_t __maybe_unused gfp_flags)
0930 {
0931 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
0932 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
0933 struct ci_hdrc *ci = hwep->ci;
0934 int retval = 0;
0935
0936 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
0937 return -EINVAL;
0938
0939 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
0940 if (req->length)
0941 hwep = (ci->ep0_dir == RX) ?
0942 ci->ep0out : ci->ep0in;
0943 if (!list_empty(&hwep->qh.queue)) {
0944 _ep_nuke(hwep);
0945 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
0946 _usb_addr(hwep));
0947 }
0948 }
0949
0950 if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
0951 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
0952 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
0953 return -EMSGSIZE;
0954 }
0955
0956
0957 if (!list_empty(&hwreq->queue)) {
0958 dev_err(hwep->ci->dev, "request already in queue\n");
0959 return -EBUSY;
0960 }
0961
0962
0963 hwreq->req.status = -EINPROGRESS;
0964 hwreq->req.actual = 0;
0965
0966 retval = _hardware_enqueue(hwep, hwreq);
0967
0968 if (retval == -EALREADY)
0969 retval = 0;
0970 if (!retval)
0971 list_add_tail(&hwreq->queue, &hwep->qh.queue);
0972
0973 return retval;
0974 }
0975
0976
0977
0978
0979
0980
0981
0982
0983 static int isr_get_status_response(struct ci_hdrc *ci,
0984 struct usb_ctrlrequest *setup)
0985 __releases(hwep->lock)
0986 __acquires(hwep->lock)
0987 {
0988 struct ci_hw_ep *hwep = ci->ep0in;
0989 struct usb_request *req = NULL;
0990 gfp_t gfp_flags = GFP_ATOMIC;
0991 int dir, num, retval;
0992
0993 if (hwep == NULL || setup == NULL)
0994 return -EINVAL;
0995
0996 spin_unlock(hwep->lock);
0997 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
0998 spin_lock(hwep->lock);
0999 if (req == NULL)
1000 return -ENOMEM;
1001
1002 req->complete = isr_get_status_complete;
1003 req->length = 2;
1004 req->buf = kzalloc(req->length, gfp_flags);
1005 if (req->buf == NULL) {
1006 retval = -ENOMEM;
1007 goto err_free_req;
1008 }
1009
1010 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1011 *(u16 *)req->buf = (ci->remote_wakeup << 1) |
1012 ci->gadget.is_selfpowered;
1013 } else if ((setup->bRequestType & USB_RECIP_MASK) \
1014 == USB_RECIP_ENDPOINT) {
1015 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
1016 TX : RX;
1017 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
1018 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
1019 }
1020
1021
1022 retval = _ep_queue(&hwep->ep, req, gfp_flags);
1023 if (retval)
1024 goto err_free_buf;
1025
1026 return 0;
1027
1028 err_free_buf:
1029 kfree(req->buf);
1030 err_free_req:
1031 spin_unlock(hwep->lock);
1032 usb_ep_free_request(&hwep->ep, req);
1033 spin_lock(hwep->lock);
1034 return retval;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 static void
1046 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
1047 {
1048 struct ci_hdrc *ci = req->context;
1049 unsigned long flags;
1050
1051 if (req->status < 0)
1052 return;
1053
1054 if (ci->setaddr) {
1055 hw_usb_set_address(ci, ci->address);
1056 ci->setaddr = false;
1057 if (ci->address)
1058 usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
1059 }
1060
1061 spin_lock_irqsave(&ci->lock, flags);
1062 if (ci->test_mode)
1063 hw_port_test_set(ci, ci->test_mode);
1064 spin_unlock_irqrestore(&ci->lock, flags);
1065 }
1066
1067
1068
1069
1070
1071
1072
1073 static int isr_setup_status_phase(struct ci_hdrc *ci)
1074 {
1075 struct ci_hw_ep *hwep;
1076
1077
1078
1079
1080
1081
1082
1083 if (WARN_ON_ONCE(!ci->status))
1084 return -EPIPE;
1085
1086 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1087 ci->status->context = ci;
1088 ci->status->complete = isr_setup_status_complete;
1089
1090 return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1101 __releases(hwep->lock)
1102 __acquires(hwep->lock)
1103 {
1104 struct ci_hw_req *hwreq, *hwreqtemp;
1105 struct ci_hw_ep *hweptemp = hwep;
1106 int retval = 0;
1107
1108 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1109 queue) {
1110 retval = _hardware_dequeue(hwep, hwreq);
1111 if (retval < 0)
1112 break;
1113 list_del_init(&hwreq->queue);
1114 if (hwreq->req.complete != NULL) {
1115 spin_unlock(hwep->lock);
1116 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1117 hwreq->req.length)
1118 hweptemp = hwep->ci->ep0in;
1119 usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1120 spin_lock(hwep->lock);
1121 }
1122 }
1123
1124 if (retval == -EBUSY)
1125 retval = 0;
1126
1127 return retval;
1128 }
1129
1130 static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
1131 {
1132 dev_warn(&ci->gadget.dev,
1133 "connect the device to an alternate port if you want HNP\n");
1134 return isr_setup_status_phase(ci);
1135 }
1136
1137
1138
1139
1140
1141
1142
1143 static void isr_setup_packet_handler(struct ci_hdrc *ci)
1144 __releases(ci->lock)
1145 __acquires(ci->lock)
1146 {
1147 struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1148 struct usb_ctrlrequest req;
1149 int type, num, dir, err = -EINVAL;
1150 u8 tmode = 0;
1151
1152
1153
1154
1155
1156 _ep_nuke(ci->ep0out);
1157 _ep_nuke(ci->ep0in);
1158
1159
1160 do {
1161 hw_test_and_set_setup_guard(ci);
1162 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1163 } while (!hw_test_and_clear_setup_guard(ci));
1164
1165 type = req.bRequestType;
1166
1167 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1168
1169 switch (req.bRequest) {
1170 case USB_REQ_CLEAR_FEATURE:
1171 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1172 le16_to_cpu(req.wValue) ==
1173 USB_ENDPOINT_HALT) {
1174 if (req.wLength != 0)
1175 break;
1176 num = le16_to_cpu(req.wIndex);
1177 dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1178 num &= USB_ENDPOINT_NUMBER_MASK;
1179 if (dir == TX)
1180 num += ci->hw_ep_max / 2;
1181 if (!ci->ci_hw_ep[num].wedge) {
1182 spin_unlock(&ci->lock);
1183 err = usb_ep_clear_halt(
1184 &ci->ci_hw_ep[num].ep);
1185 spin_lock(&ci->lock);
1186 if (err)
1187 break;
1188 }
1189 err = isr_setup_status_phase(ci);
1190 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1191 le16_to_cpu(req.wValue) ==
1192 USB_DEVICE_REMOTE_WAKEUP) {
1193 if (req.wLength != 0)
1194 break;
1195 ci->remote_wakeup = 0;
1196 err = isr_setup_status_phase(ci);
1197 } else {
1198 goto delegate;
1199 }
1200 break;
1201 case USB_REQ_GET_STATUS:
1202 if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1203 le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1204 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1205 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1206 goto delegate;
1207 if (le16_to_cpu(req.wLength) != 2 ||
1208 le16_to_cpu(req.wValue) != 0)
1209 break;
1210 err = isr_get_status_response(ci, &req);
1211 break;
1212 case USB_REQ_SET_ADDRESS:
1213 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1214 goto delegate;
1215 if (le16_to_cpu(req.wLength) != 0 ||
1216 le16_to_cpu(req.wIndex) != 0)
1217 break;
1218 ci->address = (u8)le16_to_cpu(req.wValue);
1219 ci->setaddr = true;
1220 err = isr_setup_status_phase(ci);
1221 break;
1222 case USB_REQ_SET_FEATURE:
1223 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1224 le16_to_cpu(req.wValue) ==
1225 USB_ENDPOINT_HALT) {
1226 if (req.wLength != 0)
1227 break;
1228 num = le16_to_cpu(req.wIndex);
1229 dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1230 num &= USB_ENDPOINT_NUMBER_MASK;
1231 if (dir == TX)
1232 num += ci->hw_ep_max / 2;
1233
1234 spin_unlock(&ci->lock);
1235 err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1236 spin_lock(&ci->lock);
1237 if (!err)
1238 isr_setup_status_phase(ci);
1239 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1240 if (req.wLength != 0)
1241 break;
1242 switch (le16_to_cpu(req.wValue)) {
1243 case USB_DEVICE_REMOTE_WAKEUP:
1244 ci->remote_wakeup = 1;
1245 err = isr_setup_status_phase(ci);
1246 break;
1247 case USB_DEVICE_TEST_MODE:
1248 tmode = le16_to_cpu(req.wIndex) >> 8;
1249 switch (tmode) {
1250 case USB_TEST_J:
1251 case USB_TEST_K:
1252 case USB_TEST_SE0_NAK:
1253 case USB_TEST_PACKET:
1254 case USB_TEST_FORCE_ENABLE:
1255 ci->test_mode = tmode;
1256 err = isr_setup_status_phase(
1257 ci);
1258 break;
1259 default:
1260 break;
1261 }
1262 break;
1263 case USB_DEVICE_B_HNP_ENABLE:
1264 if (ci_otg_is_fsm_mode(ci)) {
1265 ci->gadget.b_hnp_enable = 1;
1266 err = isr_setup_status_phase(
1267 ci);
1268 }
1269 break;
1270 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1271 if (ci_otg_is_fsm_mode(ci))
1272 err = otg_a_alt_hnp_support(ci);
1273 break;
1274 case USB_DEVICE_A_HNP_SUPPORT:
1275 if (ci_otg_is_fsm_mode(ci)) {
1276 ci->gadget.a_hnp_support = 1;
1277 err = isr_setup_status_phase(
1278 ci);
1279 }
1280 break;
1281 default:
1282 goto delegate;
1283 }
1284 } else {
1285 goto delegate;
1286 }
1287 break;
1288 default:
1289 delegate:
1290 if (req.wLength == 0)
1291 ci->ep0_dir = TX;
1292
1293 spin_unlock(&ci->lock);
1294 err = ci->driver->setup(&ci->gadget, &req);
1295 spin_lock(&ci->lock);
1296 break;
1297 }
1298
1299 if (err < 0) {
1300 spin_unlock(&ci->lock);
1301 if (_ep_set_halt(&hwep->ep, 1, false))
1302 dev_err(ci->dev, "error: _ep_set_halt\n");
1303 spin_lock(&ci->lock);
1304 }
1305 }
1306
1307
1308
1309
1310
1311
1312
1313 static void isr_tr_complete_handler(struct ci_hdrc *ci)
1314 __releases(ci->lock)
1315 __acquires(ci->lock)
1316 {
1317 unsigned i;
1318 int err;
1319
1320 for (i = 0; i < ci->hw_ep_max; i++) {
1321 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1322
1323 if (hwep->ep.desc == NULL)
1324 continue;
1325
1326 if (hw_test_and_clear_complete(ci, i)) {
1327 err = isr_tr_complete_low(hwep);
1328 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1329 if (err > 0)
1330 err = isr_setup_status_phase(ci);
1331 if (err < 0) {
1332 spin_unlock(&ci->lock);
1333 if (_ep_set_halt(&hwep->ep, 1, false))
1334 dev_err(ci->dev,
1335 "error: _ep_set_halt\n");
1336 spin_lock(&ci->lock);
1337 }
1338 }
1339 }
1340
1341
1342 if (i == 0 &&
1343 hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1344 isr_setup_packet_handler(ci);
1345 }
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 static int ep_enable(struct usb_ep *ep,
1357 const struct usb_endpoint_descriptor *desc)
1358 {
1359 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1360 int retval = 0;
1361 unsigned long flags;
1362 u32 cap = 0;
1363
1364 if (ep == NULL || desc == NULL)
1365 return -EINVAL;
1366
1367 spin_lock_irqsave(hwep->lock, flags);
1368
1369
1370
1371 if (!list_empty(&hwep->qh.queue)) {
1372 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1373 spin_unlock_irqrestore(hwep->lock, flags);
1374 return -EBUSY;
1375 }
1376
1377 hwep->ep.desc = desc;
1378
1379 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1380 hwep->num = usb_endpoint_num(desc);
1381 hwep->type = usb_endpoint_type(desc);
1382
1383 hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1384 hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1385
1386 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1387 cap |= QH_IOS;
1388
1389 cap |= QH_ZLT;
1390 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1391
1392
1393
1394
1395 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1396 cap |= 3 << __ffs(QH_MULT);
1397
1398 hwep->qh.ptr->cap = cpu_to_le32(cap);
1399
1400 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);
1401
1402 if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1403 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1404 retval = -EINVAL;
1405 }
1406
1407
1408
1409
1410
1411 if (hwep->num)
1412 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1413 hwep->type);
1414
1415 spin_unlock_irqrestore(hwep->lock, flags);
1416 return retval;
1417 }
1418
1419
1420
1421
1422
1423
1424 static int ep_disable(struct usb_ep *ep)
1425 {
1426 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1427 int direction, retval = 0;
1428 unsigned long flags;
1429
1430 if (ep == NULL)
1431 return -EINVAL;
1432 else if (hwep->ep.desc == NULL)
1433 return -EBUSY;
1434
1435 spin_lock_irqsave(hwep->lock, flags);
1436 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1437 spin_unlock_irqrestore(hwep->lock, flags);
1438 return 0;
1439 }
1440
1441
1442
1443 direction = hwep->dir;
1444 do {
1445 retval |= _ep_nuke(hwep);
1446 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1447
1448 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1449 hwep->dir = (hwep->dir == TX) ? RX : TX;
1450
1451 } while (hwep->dir != direction);
1452
1453 hwep->ep.desc = NULL;
1454
1455 spin_unlock_irqrestore(hwep->lock, flags);
1456 return retval;
1457 }
1458
1459
1460
1461
1462
1463
1464 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1465 {
1466 struct ci_hw_req *hwreq = NULL;
1467
1468 if (ep == NULL)
1469 return NULL;
1470
1471 hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1472 if (hwreq != NULL) {
1473 INIT_LIST_HEAD(&hwreq->queue);
1474 INIT_LIST_HEAD(&hwreq->tds);
1475 }
1476
1477 return (hwreq == NULL) ? NULL : &hwreq->req;
1478 }
1479
1480
1481
1482
1483
1484
1485 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1486 {
1487 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1488 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1489 struct td_node *node, *tmpnode;
1490 unsigned long flags;
1491
1492 if (ep == NULL || req == NULL) {
1493 return;
1494 } else if (!list_empty(&hwreq->queue)) {
1495 dev_err(hwep->ci->dev, "freeing queued request\n");
1496 return;
1497 }
1498
1499 spin_lock_irqsave(hwep->lock, flags);
1500
1501 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1502 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1503 list_del_init(&node->td);
1504 node->ptr = NULL;
1505 kfree(node);
1506 }
1507
1508 kfree(hwreq);
1509
1510 spin_unlock_irqrestore(hwep->lock, flags);
1511 }
1512
1513
1514
1515
1516
1517
1518 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1519 gfp_t __maybe_unused gfp_flags)
1520 {
1521 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1522 int retval = 0;
1523 unsigned long flags;
1524
1525 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1526 return -EINVAL;
1527
1528 spin_lock_irqsave(hwep->lock, flags);
1529 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1530 spin_unlock_irqrestore(hwep->lock, flags);
1531 return 0;
1532 }
1533 retval = _ep_queue(ep, req, gfp_flags);
1534 spin_unlock_irqrestore(hwep->lock, flags);
1535 return retval;
1536 }
1537
1538
1539
1540
1541
1542
1543 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1544 {
1545 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1546 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1547 unsigned long flags;
1548 struct td_node *node, *tmpnode;
1549
1550 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1551 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1552 list_empty(&hwep->qh.queue))
1553 return -EINVAL;
1554
1555 spin_lock_irqsave(hwep->lock, flags);
1556 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1557 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1558
1559 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1560 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1561 list_del(&node->td);
1562 kfree(node);
1563 }
1564
1565
1566 list_del_init(&hwreq->queue);
1567
1568 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1569
1570 req->status = -ECONNRESET;
1571
1572 if (hwreq->req.complete != NULL) {
1573 spin_unlock(hwep->lock);
1574 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1575 spin_lock(hwep->lock);
1576 }
1577
1578 spin_unlock_irqrestore(hwep->lock, flags);
1579 return 0;
1580 }
1581
1582
1583
1584
1585
1586
1587 static int ep_set_halt(struct usb_ep *ep, int value)
1588 {
1589 return _ep_set_halt(ep, value, true);
1590 }
1591
1592
1593
1594
1595
1596
1597 static int ep_set_wedge(struct usb_ep *ep)
1598 {
1599 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1600 unsigned long flags;
1601
1602 if (ep == NULL || hwep->ep.desc == NULL)
1603 return -EINVAL;
1604
1605 spin_lock_irqsave(hwep->lock, flags);
1606 hwep->wedge = 1;
1607 spin_unlock_irqrestore(hwep->lock, flags);
1608
1609 return usb_ep_set_halt(ep);
1610 }
1611
1612
1613
1614
1615
1616
1617 static void ep_fifo_flush(struct usb_ep *ep)
1618 {
1619 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1620 unsigned long flags;
1621
1622 if (ep == NULL) {
1623 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1624 return;
1625 }
1626
1627 spin_lock_irqsave(hwep->lock, flags);
1628 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1629 spin_unlock_irqrestore(hwep->lock, flags);
1630 return;
1631 }
1632
1633 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1634
1635 spin_unlock_irqrestore(hwep->lock, flags);
1636 }
1637
1638
1639
1640
1641
1642 static const struct usb_ep_ops usb_ep_ops = {
1643 .enable = ep_enable,
1644 .disable = ep_disable,
1645 .alloc_request = ep_alloc_request,
1646 .free_request = ep_free_request,
1647 .queue = ep_queue,
1648 .dequeue = ep_dequeue,
1649 .set_halt = ep_set_halt,
1650 .set_wedge = ep_set_wedge,
1651 .fifo_flush = ep_fifo_flush,
1652 };
1653
1654
1655
1656
1657
1658 static int ci_udc_get_frame(struct usb_gadget *_gadget)
1659 {
1660 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1661 unsigned long flags;
1662 int ret;
1663
1664 spin_lock_irqsave(&ci->lock, flags);
1665 ret = hw_read(ci, OP_FRINDEX, 0x3fff);
1666 spin_unlock_irqrestore(&ci->lock, flags);
1667 return ret >> 3;
1668 }
1669
1670
1671
1672
1673 static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
1674 {
1675 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1676
1677 if (is_active) {
1678 pm_runtime_get_sync(ci->dev);
1679 hw_device_reset(ci);
1680 spin_lock_irq(&ci->lock);
1681 if (ci->driver) {
1682 hw_device_state(ci, ci->ep0out->qh.dma);
1683 usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1684 spin_unlock_irq(&ci->lock);
1685 usb_udc_vbus_handler(_gadget, true);
1686 } else {
1687 spin_unlock_irq(&ci->lock);
1688 }
1689 } else {
1690 usb_udc_vbus_handler(_gadget, false);
1691 if (ci->driver)
1692 ci->driver->disconnect(&ci->gadget);
1693 hw_device_state(ci, 0);
1694 if (ci->platdata->notify_event)
1695 ci->platdata->notify_event(ci,
1696 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1697 _gadget_stop_activity(&ci->gadget);
1698 pm_runtime_put_sync(ci->dev);
1699 usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1700 }
1701 }
1702
1703 static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1704 {
1705 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1706 unsigned long flags;
1707 int ret = 0;
1708
1709 spin_lock_irqsave(&ci->lock, flags);
1710 ci->vbus_active = is_active;
1711 spin_unlock_irqrestore(&ci->lock, flags);
1712
1713 if (ci->usb_phy)
1714 usb_phy_set_charger_state(ci->usb_phy, is_active ?
1715 USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
1716
1717 if (ci->platdata->notify_event)
1718 ret = ci->platdata->notify_event(ci,
1719 CI_HDRC_CONTROLLER_VBUS_EVENT);
1720
1721 if (ci->driver)
1722 ci_hdrc_gadget_connect(_gadget, is_active);
1723
1724 return ret;
1725 }
1726
1727 static int ci_udc_wakeup(struct usb_gadget *_gadget)
1728 {
1729 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1730 unsigned long flags;
1731 int ret = 0;
1732
1733 spin_lock_irqsave(&ci->lock, flags);
1734 if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1735 spin_unlock_irqrestore(&ci->lock, flags);
1736 return 0;
1737 }
1738 if (!ci->remote_wakeup) {
1739 ret = -EOPNOTSUPP;
1740 goto out;
1741 }
1742 if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1743 ret = -EINVAL;
1744 goto out;
1745 }
1746 hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1747 out:
1748 spin_unlock_irqrestore(&ci->lock, flags);
1749 return ret;
1750 }
1751
1752 static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1753 {
1754 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1755
1756 if (ci->usb_phy)
1757 return usb_phy_set_power(ci->usb_phy, ma);
1758 return -ENOTSUPP;
1759 }
1760
1761 static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1762 {
1763 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1764 struct ci_hw_ep *hwep = ci->ep0in;
1765 unsigned long flags;
1766
1767 spin_lock_irqsave(hwep->lock, flags);
1768 _gadget->is_selfpowered = (is_on != 0);
1769 spin_unlock_irqrestore(hwep->lock, flags);
1770
1771 return 0;
1772 }
1773
1774
1775
1776
1777 static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1778 {
1779 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1780
1781
1782
1783
1784
1785 if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1786 return 0;
1787
1788 pm_runtime_get_sync(ci->dev);
1789 if (is_on)
1790 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1791 else
1792 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1793 pm_runtime_put_sync(ci->dev);
1794
1795 return 0;
1796 }
1797
1798 static int ci_udc_start(struct usb_gadget *gadget,
1799 struct usb_gadget_driver *driver);
1800 static int ci_udc_stop(struct usb_gadget *gadget);
1801
1802
1803 static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
1804 struct usb_endpoint_descriptor *desc,
1805 struct usb_ss_ep_comp_descriptor *comp_desc)
1806 {
1807 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1808 struct usb_ep *ep;
1809
1810 if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
1811 list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
1812 if (ep->caps.dir_in && !ep->claimed)
1813 return ep;
1814 }
1815 }
1816
1817 return NULL;
1818 }
1819
1820
1821
1822
1823
1824
1825 static const struct usb_gadget_ops usb_gadget_ops = {
1826 .get_frame = ci_udc_get_frame,
1827 .vbus_session = ci_udc_vbus_session,
1828 .wakeup = ci_udc_wakeup,
1829 .set_selfpowered = ci_udc_selfpowered,
1830 .pullup = ci_udc_pullup,
1831 .vbus_draw = ci_udc_vbus_draw,
1832 .udc_start = ci_udc_start,
1833 .udc_stop = ci_udc_stop,
1834 .match_ep = ci_udc_match_ep,
1835 };
1836
1837 static int init_eps(struct ci_hdrc *ci)
1838 {
1839 int retval = 0, i, j;
1840
1841 for (i = 0; i < ci->hw_ep_max/2; i++)
1842 for (j = RX; j <= TX; j++) {
1843 int k = i + j * ci->hw_ep_max/2;
1844 struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1845
1846 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1847 (j == TX) ? "in" : "out");
1848
1849 hwep->ci = ci;
1850 hwep->lock = &ci->lock;
1851 hwep->td_pool = ci->td_pool;
1852
1853 hwep->ep.name = hwep->name;
1854 hwep->ep.ops = &usb_ep_ops;
1855
1856 if (i == 0) {
1857 hwep->ep.caps.type_control = true;
1858 } else {
1859 hwep->ep.caps.type_iso = true;
1860 hwep->ep.caps.type_bulk = true;
1861 hwep->ep.caps.type_int = true;
1862 }
1863
1864 if (j == TX)
1865 hwep->ep.caps.dir_in = true;
1866 else
1867 hwep->ep.caps.dir_out = true;
1868
1869
1870
1871
1872
1873
1874 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1875
1876 INIT_LIST_HEAD(&hwep->qh.queue);
1877 hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1878 &hwep->qh.dma);
1879 if (hwep->qh.ptr == NULL)
1880 retval = -ENOMEM;
1881
1882
1883
1884
1885
1886 if (i == 0) {
1887 if (j == RX)
1888 ci->ep0out = hwep;
1889 else
1890 ci->ep0in = hwep;
1891
1892 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1893 continue;
1894 }
1895
1896 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1897 }
1898
1899 return retval;
1900 }
1901
1902 static void destroy_eps(struct ci_hdrc *ci)
1903 {
1904 int i;
1905
1906 for (i = 0; i < ci->hw_ep_max; i++) {
1907 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1908
1909 if (hwep->pending_td)
1910 free_pending_td(hwep);
1911 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1912 }
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922 static int ci_udc_start(struct usb_gadget *gadget,
1923 struct usb_gadget_driver *driver)
1924 {
1925 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1926 int retval;
1927
1928 if (driver->disconnect == NULL)
1929 return -EINVAL;
1930
1931 ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1932 retval = usb_ep_enable(&ci->ep0out->ep);
1933 if (retval)
1934 return retval;
1935
1936 ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1937 retval = usb_ep_enable(&ci->ep0in->ep);
1938 if (retval)
1939 return retval;
1940
1941 ci->driver = driver;
1942
1943
1944 if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1945 ci_hdrc_otg_fsm_start(ci);
1946 return retval;
1947 }
1948
1949 if (ci->vbus_active)
1950 ci_hdrc_gadget_connect(gadget, 1);
1951 else
1952 usb_udc_vbus_handler(&ci->gadget, false);
1953
1954 return retval;
1955 }
1956
1957 static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1958 {
1959 if (!ci_otg_is_fsm_mode(ci))
1960 return;
1961
1962 mutex_lock(&ci->fsm.lock);
1963 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1964 ci->fsm.a_bidl_adis_tmout = 1;
1965 ci_hdrc_otg_fsm_start(ci);
1966 } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1967 ci->fsm.protocol = PROTO_UNDEF;
1968 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1969 }
1970 mutex_unlock(&ci->fsm.lock);
1971 }
1972
1973
1974
1975
1976 static int ci_udc_stop(struct usb_gadget *gadget)
1977 {
1978 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1979 unsigned long flags;
1980
1981 spin_lock_irqsave(&ci->lock, flags);
1982 ci->driver = NULL;
1983
1984 if (ci->vbus_active) {
1985 hw_device_state(ci, 0);
1986 spin_unlock_irqrestore(&ci->lock, flags);
1987 if (ci->platdata->notify_event)
1988 ci->platdata->notify_event(ci,
1989 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1990 _gadget_stop_activity(&ci->gadget);
1991 spin_lock_irqsave(&ci->lock, flags);
1992 pm_runtime_put(ci->dev);
1993 }
1994
1995 spin_unlock_irqrestore(&ci->lock, flags);
1996
1997 ci_udc_stop_for_otg_fsm(ci);
1998 return 0;
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static irqreturn_t udc_irq(struct ci_hdrc *ci)
2011 {
2012 irqreturn_t retval;
2013 u32 intr;
2014
2015 if (ci == NULL)
2016 return IRQ_HANDLED;
2017
2018 spin_lock(&ci->lock);
2019
2020 if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
2021 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
2022 USBMODE_CM_DC) {
2023 spin_unlock(&ci->lock);
2024 return IRQ_NONE;
2025 }
2026 }
2027 intr = hw_test_and_clear_intr_active(ci);
2028
2029 if (intr) {
2030
2031 if (USBi_URI & intr)
2032 isr_reset_handler(ci);
2033
2034 if (USBi_PCI & intr) {
2035 ci->gadget.speed = hw_port_is_high_speed(ci) ?
2036 USB_SPEED_HIGH : USB_SPEED_FULL;
2037 if (ci->suspended) {
2038 if (ci->driver->resume) {
2039 spin_unlock(&ci->lock);
2040 ci->driver->resume(&ci->gadget);
2041 spin_lock(&ci->lock);
2042 }
2043 ci->suspended = 0;
2044 usb_gadget_set_state(&ci->gadget,
2045 ci->resume_state);
2046 }
2047 }
2048
2049 if (USBi_UI & intr)
2050 isr_tr_complete_handler(ci);
2051
2052 if ((USBi_SLI & intr) && !(ci->suspended)) {
2053 ci->suspended = 1;
2054 ci->resume_state = ci->gadget.state;
2055 if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
2056 ci->driver->suspend) {
2057 spin_unlock(&ci->lock);
2058 ci->driver->suspend(&ci->gadget);
2059 spin_lock(&ci->lock);
2060 }
2061 usb_gadget_set_state(&ci->gadget,
2062 USB_STATE_SUSPENDED);
2063 }
2064 retval = IRQ_HANDLED;
2065 } else {
2066 retval = IRQ_NONE;
2067 }
2068 spin_unlock(&ci->lock);
2069
2070 return retval;
2071 }
2072
2073
2074
2075
2076
2077 static int udc_start(struct ci_hdrc *ci)
2078 {
2079 struct device *dev = ci->dev;
2080 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
2081 int retval = 0;
2082
2083 ci->gadget.ops = &usb_gadget_ops;
2084 ci->gadget.speed = USB_SPEED_UNKNOWN;
2085 ci->gadget.max_speed = USB_SPEED_HIGH;
2086 ci->gadget.name = ci->platdata->name;
2087 ci->gadget.otg_caps = otg_caps;
2088 ci->gadget.sg_supported = 1;
2089 ci->gadget.irq = ci->irq;
2090
2091 if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
2092 ci->gadget.quirk_avoids_skb_reserve = 1;
2093
2094 if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
2095 otg_caps->adp_support))
2096 ci->gadget.is_otg = 1;
2097
2098 INIT_LIST_HEAD(&ci->gadget.ep_list);
2099
2100
2101 ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
2102 sizeof(struct ci_hw_qh),
2103 64, CI_HDRC_PAGE_SIZE);
2104 if (ci->qh_pool == NULL)
2105 return -ENOMEM;
2106
2107 ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
2108 sizeof(struct ci_hw_td),
2109 64, CI_HDRC_PAGE_SIZE);
2110 if (ci->td_pool == NULL) {
2111 retval = -ENOMEM;
2112 goto free_qh_pool;
2113 }
2114
2115 retval = init_eps(ci);
2116 if (retval)
2117 goto free_pools;
2118
2119 ci->gadget.ep0 = &ci->ep0in->ep;
2120
2121 retval = usb_add_gadget_udc(dev, &ci->gadget);
2122 if (retval)
2123 goto destroy_eps;
2124
2125 return retval;
2126
2127 destroy_eps:
2128 destroy_eps(ci);
2129 free_pools:
2130 dma_pool_destroy(ci->td_pool);
2131 free_qh_pool:
2132 dma_pool_destroy(ci->qh_pool);
2133 return retval;
2134 }
2135
2136
2137
2138
2139
2140
2141 void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
2142 {
2143 if (!ci->roles[CI_ROLE_GADGET])
2144 return;
2145
2146 usb_del_gadget_udc(&ci->gadget);
2147
2148 destroy_eps(ci);
2149
2150 dma_pool_destroy(ci->td_pool);
2151 dma_pool_destroy(ci->qh_pool);
2152 }
2153
2154 static int udc_id_switch_for_device(struct ci_hdrc *ci)
2155 {
2156 if (ci->platdata->pins_device)
2157 pinctrl_select_state(ci->platdata->pctl,
2158 ci->platdata->pins_device);
2159
2160 if (ci->is_otg)
2161
2162 hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
2163 OTGSC_BSVIS | OTGSC_BSVIE);
2164
2165 return 0;
2166 }
2167
2168 static void udc_id_switch_for_host(struct ci_hdrc *ci)
2169 {
2170
2171
2172
2173
2174 if (ci->is_otg)
2175 hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
2176
2177 ci->vbus_active = 0;
2178
2179 if (ci->platdata->pins_device && ci->platdata->pins_default)
2180 pinctrl_select_state(ci->platdata->pctl,
2181 ci->platdata->pins_default);
2182 }
2183
2184
2185
2186
2187
2188
2189
2190 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2191 {
2192 struct ci_role_driver *rdrv;
2193 int ret;
2194
2195 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2196 return -ENXIO;
2197
2198 rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
2199 if (!rdrv)
2200 return -ENOMEM;
2201
2202 rdrv->start = udc_id_switch_for_device;
2203 rdrv->stop = udc_id_switch_for_host;
2204 rdrv->irq = udc_irq;
2205 rdrv->name = "gadget";
2206
2207 ret = udc_start(ci);
2208 if (!ret)
2209 ci->roles[CI_ROLE_GADGET] = rdrv;
2210
2211 return ret;
2212 }