0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dma-mapping.h>
0010 #include <linux/slab.h>
0011 #include <linux/nls.h>
0012
0013 #include "xhci.h"
0014 #include "xhci-trace.h"
0015 #include "xhci-dbgcap.h"
0016
0017 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
0018 {
0019 if (!ctx)
0020 return;
0021 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
0022 kfree(ctx);
0023 }
0024
0025
0026 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
0027 {
0028 if (!ring)
0029 return;
0030
0031 if (ring->first_seg && ring->first_seg->trbs) {
0032 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
0033 ring->first_seg->trbs,
0034 ring->first_seg->dma);
0035 kfree(ring->first_seg);
0036 }
0037 kfree(ring);
0038 }
0039
0040 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
0041 {
0042 struct usb_string_descriptor *s_desc;
0043 u32 string_length;
0044
0045
0046 s_desc = (struct usb_string_descriptor *)strings->serial;
0047 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
0048 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
0049 DBC_MAX_STRING_LENGTH);
0050
0051 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
0052 s_desc->bDescriptorType = USB_DT_STRING;
0053 string_length = s_desc->bLength;
0054 string_length <<= 8;
0055
0056
0057 s_desc = (struct usb_string_descriptor *)strings->product;
0058 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
0059 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
0060 DBC_MAX_STRING_LENGTH);
0061
0062 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
0063 s_desc->bDescriptorType = USB_DT_STRING;
0064 string_length += s_desc->bLength;
0065 string_length <<= 8;
0066
0067
0068 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
0069 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
0070 strlen(DBC_STRING_MANUFACTURER),
0071 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
0072 DBC_MAX_STRING_LENGTH);
0073
0074 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
0075 s_desc->bDescriptorType = USB_DT_STRING;
0076 string_length += s_desc->bLength;
0077 string_length <<= 8;
0078
0079
0080 strings->string0[0] = 4;
0081 strings->string0[1] = USB_DT_STRING;
0082 strings->string0[2] = 0x09;
0083 strings->string0[3] = 0x04;
0084 string_length += 4;
0085
0086 return string_length;
0087 }
0088
0089 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
0090 {
0091 struct dbc_info_context *info;
0092 struct xhci_ep_ctx *ep_ctx;
0093 u32 dev_info;
0094 dma_addr_t deq, dma;
0095 unsigned int max_burst;
0096
0097 if (!dbc)
0098 return;
0099
0100
0101 info = (struct dbc_info_context *)dbc->ctx->bytes;
0102 dma = dbc->string_dma;
0103 info->string0 = cpu_to_le64(dma);
0104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
0105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
0106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
0107 info->length = cpu_to_le32(string_length);
0108
0109
0110 ep_ctx = dbc_bulkout_ctx(dbc);
0111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
0112 deq = dbc_bulkout_enq(dbc);
0113 ep_ctx->ep_info = 0;
0114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
0115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
0116
0117
0118 ep_ctx = dbc_bulkin_ctx(dbc);
0119 deq = dbc_bulkin_enq(dbc);
0120 ep_ctx->ep_info = 0;
0121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
0122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
0123
0124
0125 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
0126
0127 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
0128 writel(dev_info, &dbc->regs->devinfo1);
0129
0130 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
0131 writel(dev_info, &dbc->regs->devinfo2);
0132 }
0133
0134 static void xhci_dbc_giveback(struct dbc_request *req, int status)
0135 __releases(&dbc->lock)
0136 __acquires(&dbc->lock)
0137 {
0138 struct xhci_dbc *dbc = req->dbc;
0139 struct device *dev = dbc->dev;
0140
0141 list_del_init(&req->list_pending);
0142 req->trb_dma = 0;
0143 req->trb = NULL;
0144
0145 if (req->status == -EINPROGRESS)
0146 req->status = status;
0147
0148 trace_xhci_dbc_giveback_request(req);
0149
0150 dma_unmap_single(dev,
0151 req->dma,
0152 req->length,
0153 dbc_ep_dma_direction(req));
0154
0155
0156 spin_unlock(&dbc->lock);
0157 req->complete(dbc, req);
0158 spin_lock(&dbc->lock);
0159 }
0160
0161 static void xhci_dbc_flush_single_request(struct dbc_request *req)
0162 {
0163 union xhci_trb *trb = req->trb;
0164
0165 trb->generic.field[0] = 0;
0166 trb->generic.field[1] = 0;
0167 trb->generic.field[2] = 0;
0168 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
0169 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
0170
0171 xhci_dbc_giveback(req, -ESHUTDOWN);
0172 }
0173
0174 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
0175 {
0176 struct dbc_request *req, *tmp;
0177
0178 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
0179 xhci_dbc_flush_single_request(req);
0180 }
0181
0182 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
0183 {
0184 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
0185 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
0186 }
0187
0188 struct dbc_request *
0189 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
0190 {
0191 struct dbc_request *req;
0192
0193 if (direction != BULK_IN &&
0194 direction != BULK_OUT)
0195 return NULL;
0196
0197 if (!dbc)
0198 return NULL;
0199
0200 req = kzalloc(sizeof(*req), flags);
0201 if (!req)
0202 return NULL;
0203
0204 req->dbc = dbc;
0205 INIT_LIST_HEAD(&req->list_pending);
0206 INIT_LIST_HEAD(&req->list_pool);
0207 req->direction = direction;
0208
0209 trace_xhci_dbc_alloc_request(req);
0210
0211 return req;
0212 }
0213
0214 void
0215 dbc_free_request(struct dbc_request *req)
0216 {
0217 trace_xhci_dbc_free_request(req);
0218
0219 kfree(req);
0220 }
0221
0222 static void
0223 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
0224 u32 field2, u32 field3, u32 field4)
0225 {
0226 union xhci_trb *trb, *next;
0227
0228 trb = ring->enqueue;
0229 trb->generic.field[0] = cpu_to_le32(field1);
0230 trb->generic.field[1] = cpu_to_le32(field2);
0231 trb->generic.field[2] = cpu_to_le32(field3);
0232 trb->generic.field[3] = cpu_to_le32(field4);
0233
0234 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
0235
0236 ring->num_trbs_free--;
0237 next = ++(ring->enqueue);
0238 if (TRB_TYPE_LINK_LE32(next->link.control)) {
0239 next->link.control ^= cpu_to_le32(TRB_CYCLE);
0240 ring->enqueue = ring->enq_seg->trbs;
0241 ring->cycle_state ^= 1;
0242 }
0243 }
0244
0245 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
0246 struct dbc_request *req)
0247 {
0248 u64 addr;
0249 union xhci_trb *trb;
0250 unsigned int num_trbs;
0251 struct xhci_dbc *dbc = req->dbc;
0252 struct xhci_ring *ring = dep->ring;
0253 u32 length, control, cycle;
0254
0255 num_trbs = count_trbs(req->dma, req->length);
0256 WARN_ON(num_trbs != 1);
0257 if (ring->num_trbs_free < num_trbs)
0258 return -EBUSY;
0259
0260 addr = req->dma;
0261 trb = ring->enqueue;
0262 cycle = ring->cycle_state;
0263 length = TRB_LEN(req->length);
0264 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
0265
0266 if (cycle)
0267 control &= cpu_to_le32(~TRB_CYCLE);
0268 else
0269 control |= cpu_to_le32(TRB_CYCLE);
0270
0271 req->trb = ring->enqueue;
0272 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
0273 xhci_dbc_queue_trb(ring,
0274 lower_32_bits(addr),
0275 upper_32_bits(addr),
0276 length, control);
0277
0278
0279
0280
0281
0282 wmb();
0283
0284 if (cycle)
0285 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
0286 else
0287 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
0288
0289 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
0290
0291 return 0;
0292 }
0293
0294 static int
0295 dbc_ep_do_queue(struct dbc_request *req)
0296 {
0297 int ret;
0298 struct xhci_dbc *dbc = req->dbc;
0299 struct device *dev = dbc->dev;
0300 struct dbc_ep *dep = &dbc->eps[req->direction];
0301
0302 if (!req->length || !req->buf)
0303 return -EINVAL;
0304
0305 req->actual = 0;
0306 req->status = -EINPROGRESS;
0307
0308 req->dma = dma_map_single(dev,
0309 req->buf,
0310 req->length,
0311 dbc_ep_dma_direction(dep));
0312 if (dma_mapping_error(dev, req->dma)) {
0313 dev_err(dbc->dev, "failed to map buffer\n");
0314 return -EFAULT;
0315 }
0316
0317 ret = xhci_dbc_queue_bulk_tx(dep, req);
0318 if (ret) {
0319 dev_err(dbc->dev, "failed to queue trbs\n");
0320 dma_unmap_single(dev,
0321 req->dma,
0322 req->length,
0323 dbc_ep_dma_direction(dep));
0324 return -EFAULT;
0325 }
0326
0327 list_add_tail(&req->list_pending, &dep->list_pending);
0328
0329 return 0;
0330 }
0331
0332 int dbc_ep_queue(struct dbc_request *req)
0333 {
0334 unsigned long flags;
0335 struct xhci_dbc *dbc = req->dbc;
0336 int ret = -ESHUTDOWN;
0337
0338 if (!dbc)
0339 return -ENODEV;
0340
0341 if (req->direction != BULK_IN &&
0342 req->direction != BULK_OUT)
0343 return -EINVAL;
0344
0345 spin_lock_irqsave(&dbc->lock, flags);
0346 if (dbc->state == DS_CONFIGURED)
0347 ret = dbc_ep_do_queue(req);
0348 spin_unlock_irqrestore(&dbc->lock, flags);
0349
0350 mod_delayed_work(system_wq, &dbc->event_work, 0);
0351
0352 trace_xhci_dbc_queue_request(req);
0353
0354 return ret;
0355 }
0356
0357 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
0358 {
0359 struct dbc_ep *dep;
0360
0361 dep = &dbc->eps[direction];
0362 dep->dbc = dbc;
0363 dep->direction = direction;
0364 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
0365
0366 INIT_LIST_HEAD(&dep->list_pending);
0367 }
0368
0369 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
0370 {
0371 xhci_dbc_do_eps_init(dbc, BULK_OUT);
0372 xhci_dbc_do_eps_init(dbc, BULK_IN);
0373 }
0374
0375 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
0376 {
0377 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
0378 }
0379
0380 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
0381 struct xhci_erst *erst, gfp_t flags)
0382 {
0383 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
0384 &erst->erst_dma_addr, flags);
0385 if (!erst->entries)
0386 return -ENOMEM;
0387
0388 erst->num_entries = 1;
0389 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
0390 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
0391 erst->entries[0].rsvd = 0;
0392 return 0;
0393 }
0394
0395 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
0396 {
0397 if (erst->entries)
0398 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
0399 erst->entries, erst->erst_dma_addr);
0400 erst->entries = NULL;
0401 }
0402
0403 static struct xhci_container_ctx *
0404 dbc_alloc_ctx(struct device *dev, gfp_t flags)
0405 {
0406 struct xhci_container_ctx *ctx;
0407
0408 ctx = kzalloc(sizeof(*ctx), flags);
0409 if (!ctx)
0410 return NULL;
0411
0412
0413 ctx->size = 3 * DBC_CONTEXT_SIZE;
0414 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
0415 if (!ctx->bytes) {
0416 kfree(ctx);
0417 return NULL;
0418 }
0419 return ctx;
0420 }
0421
0422 static struct xhci_ring *
0423 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
0424 {
0425 struct xhci_ring *ring;
0426 struct xhci_segment *seg;
0427 dma_addr_t dma;
0428
0429 ring = kzalloc(sizeof(*ring), flags);
0430 if (!ring)
0431 return NULL;
0432
0433 ring->num_segs = 1;
0434 ring->type = type;
0435
0436 seg = kzalloc(sizeof(*seg), flags);
0437 if (!seg)
0438 goto seg_fail;
0439
0440 ring->first_seg = seg;
0441 ring->last_seg = seg;
0442 seg->next = seg;
0443
0444 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
0445 if (!seg->trbs)
0446 goto dma_fail;
0447
0448 seg->dma = dma;
0449
0450
0451 if (type != TYPE_EVENT) {
0452 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
0453
0454 trb->link.segment_ptr = cpu_to_le64(dma);
0455 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
0456 }
0457 INIT_LIST_HEAD(&ring->td_list);
0458 xhci_initialize_ring_info(ring, 1);
0459 return ring;
0460 dma_fail:
0461 kfree(seg);
0462 seg_fail:
0463 kfree(ring);
0464 return NULL;
0465 }
0466
0467 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
0468 {
0469 int ret;
0470 dma_addr_t deq;
0471 u32 string_length;
0472 struct device *dev = dbc->dev;
0473
0474
0475 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
0476 if (!dbc->ring_evt)
0477 goto evt_fail;
0478
0479 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
0480 if (!dbc->ring_in)
0481 goto in_fail;
0482
0483 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
0484 if (!dbc->ring_out)
0485 goto out_fail;
0486
0487
0488 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
0489 if (ret)
0490 goto erst_fail;
0491
0492
0493 dbc->ctx = dbc_alloc_ctx(dev, flags);
0494 if (!dbc->ctx)
0495 goto ctx_fail;
0496
0497
0498 dbc->string_size = sizeof(struct dbc_str_descs);
0499 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
0500 &dbc->string_dma, flags);
0501 if (!dbc->string)
0502 goto string_fail;
0503
0504
0505 writel(dbc->erst.erst_size, &dbc->regs->ersts);
0506
0507 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
0508 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
0509 dbc->ring_evt->dequeue);
0510 lo_hi_writeq(deq, &dbc->regs->erdp);
0511
0512
0513 string_length = xhci_dbc_populate_strings(dbc->string);
0514 xhci_dbc_init_contexts(dbc, string_length);
0515
0516 xhci_dbc_eps_init(dbc);
0517 dbc->state = DS_INITIALIZED;
0518
0519 return 0;
0520
0521 string_fail:
0522 dbc_free_ctx(dev, dbc->ctx);
0523 dbc->ctx = NULL;
0524 ctx_fail:
0525 dbc_erst_free(dev, &dbc->erst);
0526 erst_fail:
0527 dbc_ring_free(dev, dbc->ring_out);
0528 dbc->ring_out = NULL;
0529 out_fail:
0530 dbc_ring_free(dev, dbc->ring_in);
0531 dbc->ring_in = NULL;
0532 in_fail:
0533 dbc_ring_free(dev, dbc->ring_evt);
0534 dbc->ring_evt = NULL;
0535 evt_fail:
0536 return -ENOMEM;
0537 }
0538
0539 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
0540 {
0541 if (!dbc)
0542 return;
0543
0544 xhci_dbc_eps_exit(dbc);
0545
0546 if (dbc->string) {
0547 dma_free_coherent(dbc->dev, dbc->string_size,
0548 dbc->string, dbc->string_dma);
0549 dbc->string = NULL;
0550 }
0551
0552 dbc_free_ctx(dbc->dev, dbc->ctx);
0553 dbc->ctx = NULL;
0554
0555 dbc_erst_free(dbc->dev, &dbc->erst);
0556 dbc_ring_free(dbc->dev, dbc->ring_out);
0557 dbc_ring_free(dbc->dev, dbc->ring_in);
0558 dbc_ring_free(dbc->dev, dbc->ring_evt);
0559 dbc->ring_in = NULL;
0560 dbc->ring_out = NULL;
0561 dbc->ring_evt = NULL;
0562 }
0563
0564 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
0565 {
0566 int ret;
0567 u32 ctrl;
0568
0569 if (dbc->state != DS_DISABLED)
0570 return -EINVAL;
0571
0572 writel(0, &dbc->regs->control);
0573 ret = xhci_handshake(&dbc->regs->control,
0574 DBC_CTRL_DBC_ENABLE,
0575 0, 1000);
0576 if (ret)
0577 return ret;
0578
0579 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
0580 if (ret)
0581 return ret;
0582
0583 ctrl = readl(&dbc->regs->control);
0584 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
0585 &dbc->regs->control);
0586 ret = xhci_handshake(&dbc->regs->control,
0587 DBC_CTRL_DBC_ENABLE,
0588 DBC_CTRL_DBC_ENABLE, 1000);
0589 if (ret)
0590 return ret;
0591
0592 dbc->state = DS_ENABLED;
0593
0594 return 0;
0595 }
0596
0597 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
0598 {
0599 if (dbc->state == DS_DISABLED)
0600 return -1;
0601
0602 writel(0, &dbc->regs->control);
0603 dbc->state = DS_DISABLED;
0604
0605 return 0;
0606 }
0607
0608 static int xhci_dbc_start(struct xhci_dbc *dbc)
0609 {
0610 int ret;
0611 unsigned long flags;
0612
0613 WARN_ON(!dbc);
0614
0615 pm_runtime_get_sync(dbc->dev);
0616
0617 spin_lock_irqsave(&dbc->lock, flags);
0618 ret = xhci_do_dbc_start(dbc);
0619 spin_unlock_irqrestore(&dbc->lock, flags);
0620
0621 if (ret) {
0622 pm_runtime_put(dbc->dev);
0623 return ret;
0624 }
0625
0626 return mod_delayed_work(system_wq, &dbc->event_work, 1);
0627 }
0628
0629 static void xhci_dbc_stop(struct xhci_dbc *dbc)
0630 {
0631 int ret;
0632 unsigned long flags;
0633
0634 WARN_ON(!dbc);
0635
0636 switch (dbc->state) {
0637 case DS_DISABLED:
0638 return;
0639 case DS_CONFIGURED:
0640 case DS_STALLED:
0641 if (dbc->driver->disconnect)
0642 dbc->driver->disconnect(dbc);
0643 break;
0644 default:
0645 break;
0646 }
0647
0648 cancel_delayed_work_sync(&dbc->event_work);
0649
0650 spin_lock_irqsave(&dbc->lock, flags);
0651 ret = xhci_do_dbc_stop(dbc);
0652 spin_unlock_irqrestore(&dbc->lock, flags);
0653
0654 if (!ret) {
0655 xhci_dbc_mem_cleanup(dbc);
0656 pm_runtime_put_sync(dbc->dev);
0657 }
0658 }
0659
0660 static void
0661 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
0662 {
0663 u32 portsc;
0664
0665 portsc = readl(&dbc->regs->portsc);
0666 if (portsc & DBC_PORTSC_CONN_CHANGE)
0667 dev_info(dbc->dev, "DbC port connect change\n");
0668
0669 if (portsc & DBC_PORTSC_RESET_CHANGE)
0670 dev_info(dbc->dev, "DbC port reset change\n");
0671
0672 if (portsc & DBC_PORTSC_LINK_CHANGE)
0673 dev_info(dbc->dev, "DbC port link status change\n");
0674
0675 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
0676 dev_info(dbc->dev, "DbC config error change\n");
0677
0678
0679 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
0680 }
0681
0682 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
0683 {
0684 struct dbc_ep *dep;
0685 struct xhci_ring *ring;
0686 int ep_id;
0687 int status;
0688 u32 comp_code;
0689 size_t remain_length;
0690 struct dbc_request *req = NULL, *r;
0691
0692 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
0693 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
0694 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
0695 dep = (ep_id == EPID_OUT) ?
0696 get_out_ep(dbc) : get_in_ep(dbc);
0697 ring = dep->ring;
0698
0699 switch (comp_code) {
0700 case COMP_SUCCESS:
0701 remain_length = 0;
0702 fallthrough;
0703 case COMP_SHORT_PACKET:
0704 status = 0;
0705 break;
0706 case COMP_TRB_ERROR:
0707 case COMP_BABBLE_DETECTED_ERROR:
0708 case COMP_USB_TRANSACTION_ERROR:
0709 case COMP_STALL_ERROR:
0710 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
0711 status = -comp_code;
0712 break;
0713 default:
0714 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
0715 status = -comp_code;
0716 break;
0717 }
0718
0719
0720 list_for_each_entry(r, &dep->list_pending, list_pending) {
0721 if (r->trb_dma == event->trans_event.buffer) {
0722 req = r;
0723 break;
0724 }
0725 }
0726
0727 if (!req) {
0728 dev_warn(dbc->dev, "no matched request\n");
0729 return;
0730 }
0731
0732 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
0733
0734 ring->num_trbs_free++;
0735 req->actual = req->length - remain_length;
0736 xhci_dbc_giveback(req, status);
0737 }
0738
0739 static void inc_evt_deq(struct xhci_ring *ring)
0740 {
0741
0742 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
0743 ring->cycle_state ^= 1;
0744 ring->dequeue = ring->deq_seg->trbs;
0745 return;
0746 }
0747 ring->dequeue++;
0748 }
0749
0750 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
0751 {
0752 dma_addr_t deq;
0753 struct dbc_ep *dep;
0754 union xhci_trb *evt;
0755 u32 ctrl, portsc;
0756 bool update_erdp = false;
0757
0758
0759 switch (dbc->state) {
0760 case DS_DISABLED:
0761 case DS_INITIALIZED:
0762
0763 return EVT_ERR;
0764 case DS_ENABLED:
0765 portsc = readl(&dbc->regs->portsc);
0766 if (portsc & DBC_PORTSC_CONN_STATUS) {
0767 dbc->state = DS_CONNECTED;
0768 dev_info(dbc->dev, "DbC connected\n");
0769 }
0770
0771 return EVT_DONE;
0772 case DS_CONNECTED:
0773 ctrl = readl(&dbc->regs->control);
0774 if (ctrl & DBC_CTRL_DBC_RUN) {
0775 dbc->state = DS_CONFIGURED;
0776 dev_info(dbc->dev, "DbC configured\n");
0777 portsc = readl(&dbc->regs->portsc);
0778 writel(portsc, &dbc->regs->portsc);
0779 return EVT_GSER;
0780 }
0781
0782 return EVT_DONE;
0783 case DS_CONFIGURED:
0784
0785 portsc = readl(&dbc->regs->portsc);
0786 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
0787 !(portsc & DBC_PORTSC_CONN_STATUS)) {
0788 dev_info(dbc->dev, "DbC cable unplugged\n");
0789 dbc->state = DS_ENABLED;
0790 xhci_dbc_flush_requests(dbc);
0791
0792 return EVT_DISC;
0793 }
0794
0795
0796 if (portsc & DBC_PORTSC_RESET_CHANGE) {
0797 dev_info(dbc->dev, "DbC port reset\n");
0798 writel(portsc, &dbc->regs->portsc);
0799 dbc->state = DS_ENABLED;
0800 xhci_dbc_flush_requests(dbc);
0801
0802 return EVT_DISC;
0803 }
0804
0805
0806 ctrl = readl(&dbc->regs->control);
0807 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
0808 (ctrl & DBC_CTRL_HALT_OUT_TR)) {
0809 dev_info(dbc->dev, "DbC Endpoint stall\n");
0810 dbc->state = DS_STALLED;
0811
0812 if (ctrl & DBC_CTRL_HALT_IN_TR) {
0813 dep = get_in_ep(dbc);
0814 xhci_dbc_flush_endpoint_requests(dep);
0815 }
0816
0817 if (ctrl & DBC_CTRL_HALT_OUT_TR) {
0818 dep = get_out_ep(dbc);
0819 xhci_dbc_flush_endpoint_requests(dep);
0820 }
0821
0822 return EVT_DONE;
0823 }
0824
0825
0826 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
0827 writel(ctrl, &dbc->regs->control);
0828 ctrl = readl(&dbc->regs->control);
0829 }
0830
0831 break;
0832 case DS_STALLED:
0833 ctrl = readl(&dbc->regs->control);
0834 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
0835 !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
0836 (ctrl & DBC_CTRL_DBC_RUN)) {
0837 dbc->state = DS_CONFIGURED;
0838 break;
0839 }
0840
0841 return EVT_DONE;
0842 default:
0843 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
0844 break;
0845 }
0846
0847
0848 evt = dbc->ring_evt->dequeue;
0849 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
0850 dbc->ring_evt->cycle_state) {
0851
0852
0853
0854
0855 rmb();
0856
0857 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
0858
0859 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
0860 case TRB_TYPE(TRB_PORT_STATUS):
0861 dbc_handle_port_status(dbc, evt);
0862 break;
0863 case TRB_TYPE(TRB_TRANSFER):
0864 dbc_handle_xfer_event(dbc, evt);
0865 break;
0866 default:
0867 break;
0868 }
0869
0870 inc_evt_deq(dbc->ring_evt);
0871
0872 evt = dbc->ring_evt->dequeue;
0873 update_erdp = true;
0874 }
0875
0876
0877 if (update_erdp) {
0878 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
0879 dbc->ring_evt->dequeue);
0880 lo_hi_writeq(deq, &dbc->regs->erdp);
0881 }
0882
0883 return EVT_DONE;
0884 }
0885
0886 static void xhci_dbc_handle_events(struct work_struct *work)
0887 {
0888 enum evtreturn evtr;
0889 struct xhci_dbc *dbc;
0890 unsigned long flags;
0891
0892 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
0893
0894 spin_lock_irqsave(&dbc->lock, flags);
0895 evtr = xhci_dbc_do_handle_events(dbc);
0896 spin_unlock_irqrestore(&dbc->lock, flags);
0897
0898 switch (evtr) {
0899 case EVT_GSER:
0900 if (dbc->driver->configure)
0901 dbc->driver->configure(dbc);
0902 break;
0903 case EVT_DISC:
0904 if (dbc->driver->disconnect)
0905 dbc->driver->disconnect(dbc);
0906 break;
0907 case EVT_DONE:
0908 break;
0909 default:
0910 dev_info(dbc->dev, "stop handling dbc events\n");
0911 return;
0912 }
0913
0914 mod_delayed_work(system_wq, &dbc->event_work, 1);
0915 }
0916
0917 static ssize_t dbc_show(struct device *dev,
0918 struct device_attribute *attr,
0919 char *buf)
0920 {
0921 const char *p;
0922 struct xhci_dbc *dbc;
0923 struct xhci_hcd *xhci;
0924
0925 xhci = hcd_to_xhci(dev_get_drvdata(dev));
0926 dbc = xhci->dbc;
0927
0928 switch (dbc->state) {
0929 case DS_DISABLED:
0930 p = "disabled";
0931 break;
0932 case DS_INITIALIZED:
0933 p = "initialized";
0934 break;
0935 case DS_ENABLED:
0936 p = "enabled";
0937 break;
0938 case DS_CONNECTED:
0939 p = "connected";
0940 break;
0941 case DS_CONFIGURED:
0942 p = "configured";
0943 break;
0944 case DS_STALLED:
0945 p = "stalled";
0946 break;
0947 default:
0948 p = "unknown";
0949 }
0950
0951 return sprintf(buf, "%s\n", p);
0952 }
0953
0954 static ssize_t dbc_store(struct device *dev,
0955 struct device_attribute *attr,
0956 const char *buf, size_t count)
0957 {
0958 struct xhci_hcd *xhci;
0959 struct xhci_dbc *dbc;
0960
0961 xhci = hcd_to_xhci(dev_get_drvdata(dev));
0962 dbc = xhci->dbc;
0963
0964 if (!strncmp(buf, "enable", 6))
0965 xhci_dbc_start(dbc);
0966 else if (!strncmp(buf, "disable", 7))
0967 xhci_dbc_stop(dbc);
0968 else
0969 return -EINVAL;
0970
0971 return count;
0972 }
0973
0974 static DEVICE_ATTR_RW(dbc);
0975
0976 struct xhci_dbc *
0977 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
0978 {
0979 struct xhci_dbc *dbc;
0980 int ret;
0981
0982 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
0983 if (!dbc)
0984 return NULL;
0985
0986 dbc->regs = base;
0987 dbc->dev = dev;
0988 dbc->driver = driver;
0989
0990 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
0991 return NULL;
0992
0993 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
0994 spin_lock_init(&dbc->lock);
0995
0996 ret = device_create_file(dev, &dev_attr_dbc);
0997 if (ret)
0998 goto err;
0999
1000 return dbc;
1001 err:
1002 kfree(dbc);
1003 return NULL;
1004 }
1005
1006
1007 void xhci_dbc_remove(struct xhci_dbc *dbc)
1008 {
1009 if (!dbc)
1010 return;
1011
1012 xhci_dbc_stop(dbc);
1013
1014
1015 device_remove_file(dbc->dev, &dev_attr_dbc);
1016
1017 kfree(dbc);
1018 }
1019
1020
1021 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1022 {
1023 struct device *dev;
1024 void __iomem *base;
1025 int ret;
1026 int dbc_cap_offs;
1027
1028
1029 dev = xhci_to_hcd(xhci)->self.controller;
1030 base = &xhci->cap_regs->hc_capbase;
1031
1032 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1033 if (!dbc_cap_offs)
1034 return -ENODEV;
1035
1036
1037 if (xhci->dbc)
1038 return -EBUSY;
1039
1040 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1041
1042 return ret;
1043 }
1044
1045 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1046 {
1047 unsigned long flags;
1048
1049 if (!xhci->dbc)
1050 return;
1051
1052 xhci_dbc_tty_remove(xhci->dbc);
1053 spin_lock_irqsave(&xhci->lock, flags);
1054 xhci->dbc = NULL;
1055 spin_unlock_irqrestore(&xhci->lock, flags);
1056 }
1057
1058 #ifdef CONFIG_PM
1059 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1060 {
1061 struct xhci_dbc *dbc = xhci->dbc;
1062
1063 if (!dbc)
1064 return 0;
1065
1066 if (dbc->state == DS_CONFIGURED)
1067 dbc->resume_required = 1;
1068
1069 xhci_dbc_stop(dbc);
1070
1071 return 0;
1072 }
1073
1074 int xhci_dbc_resume(struct xhci_hcd *xhci)
1075 {
1076 int ret = 0;
1077 struct xhci_dbc *dbc = xhci->dbc;
1078
1079 if (!dbc)
1080 return 0;
1081
1082 if (dbc->resume_required) {
1083 dbc->resume_required = 0;
1084 xhci_dbc_start(dbc);
1085 }
1086
1087 return ret;
1088 }
1089 #endif
1090
1091 int xhci_dbc_init(void)
1092 {
1093 return dbc_tty_init();
1094 }
1095
1096 void xhci_dbc_exit(void)
1097 {
1098 dbc_tty_exit();
1099 }