0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
0011
0012 #include <linux/console.h>
0013 #include <linux/pci_regs.h>
0014 #include <linux/pci_ids.h>
0015 #include <linux/memblock.h>
0016 #include <linux/io.h>
0017 #include <asm/pci-direct.h>
0018 #include <asm/fixmap.h>
0019 #include <linux/bcd.h>
0020 #include <linux/export.h>
0021 #include <linux/module.h>
0022 #include <linux/delay.h>
0023 #include <linux/kthread.h>
0024 #include <linux/usb/xhci-dbgp.h>
0025
0026 #include "../host/xhci.h"
0027 #include "xhci-dbc.h"
0028
0029 static struct xdbc_state xdbc;
0030 static bool early_console_keep;
0031
0032 #ifdef XDBC_TRACE
0033 #define xdbc_trace trace_printk
0034 #else
0035 static inline void xdbc_trace(const char *fmt, ...) { }
0036 #endif
0037
0038 static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
0039 {
0040 u64 val64, sz64, mask64;
0041 void __iomem *base;
0042 u32 val, sz;
0043 u8 byte;
0044
0045 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
0046 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
0047 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
0048 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
0049
0050 if (val == 0xffffffff || sz == 0xffffffff) {
0051 pr_notice("invalid mmio bar\n");
0052 return NULL;
0053 }
0054
0055 val64 = val & PCI_BASE_ADDRESS_MEM_MASK;
0056 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
0057 mask64 = PCI_BASE_ADDRESS_MEM_MASK;
0058
0059 if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
0060 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
0061 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
0062 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
0063 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
0064
0065 val64 |= (u64)val << 32;
0066 sz64 |= (u64)sz << 32;
0067 mask64 |= ~0ULL << 32;
0068 }
0069
0070 sz64 &= mask64;
0071
0072 if (!sz64) {
0073 pr_notice("invalid mmio address\n");
0074 return NULL;
0075 }
0076
0077 sz64 = 1ULL << __ffs64(sz64);
0078
0079
0080 byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
0081 if (!(byte & PCI_COMMAND_MEMORY)) {
0082 byte |= PCI_COMMAND_MEMORY;
0083 write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
0084 }
0085
0086 xdbc.xhci_start = val64;
0087 xdbc.xhci_length = sz64;
0088 base = early_ioremap(val64, sz64);
0089
0090 return base;
0091 }
0092
0093 static void * __init xdbc_get_page(dma_addr_t *dma_addr)
0094 {
0095 void *virt;
0096
0097 virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
0098 if (!virt)
0099 return NULL;
0100
0101 if (dma_addr)
0102 *dma_addr = (dma_addr_t)__pa(virt);
0103
0104 return virt;
0105 }
0106
0107 static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
0108 {
0109 u32 bus, dev, func, class;
0110
0111 for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
0112 for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
0113 for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
0114
0115 class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
0116 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
0117 continue;
0118
0119 if (xdbc_num-- != 0)
0120 continue;
0121
0122 *b = bus;
0123 *d = dev;
0124 *f = func;
0125
0126 return 0;
0127 }
0128 }
0129 }
0130
0131 return -1;
0132 }
0133
0134 static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
0135 {
0136 u32 result;
0137
0138
0139 do {
0140 result = readl(ptr);
0141 result &= mask;
0142 if (result == done)
0143 return 0;
0144 udelay(delay);
0145 wait -= delay;
0146 } while (wait > 0);
0147
0148 return -ETIMEDOUT;
0149 }
0150
0151 static void __init xdbc_bios_handoff(void)
0152 {
0153 int offset, timeout;
0154 u32 val;
0155
0156 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
0157 val = readl(xdbc.xhci_base + offset);
0158
0159 if (val & XHCI_HC_BIOS_OWNED) {
0160 writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
0161 timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
0162
0163 if (timeout) {
0164 pr_notice("failed to hand over xHCI control from BIOS\n");
0165 writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
0166 }
0167 }
0168
0169
0170 val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
0171 val &= XHCI_LEGACY_DISABLE_SMI;
0172 val |= XHCI_LEGACY_SMI_EVENTS;
0173 writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
0174 }
0175
0176 static int __init
0177 xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
0178 {
0179 seg->trbs = xdbc_get_page(&seg->dma);
0180 if (!seg->trbs)
0181 return -ENOMEM;
0182
0183 ring->segment = seg;
0184
0185 return 0;
0186 }
0187
0188 static void __init xdbc_free_ring(struct xdbc_ring *ring)
0189 {
0190 struct xdbc_segment *seg = ring->segment;
0191
0192 if (!seg)
0193 return;
0194
0195 memblock_phys_free(seg->dma, PAGE_SIZE);
0196 ring->segment = NULL;
0197 }
0198
0199 static void xdbc_reset_ring(struct xdbc_ring *ring)
0200 {
0201 struct xdbc_segment *seg = ring->segment;
0202 struct xdbc_trb *link_trb;
0203
0204 memset(seg->trbs, 0, PAGE_SIZE);
0205
0206 ring->enqueue = seg->trbs;
0207 ring->dequeue = seg->trbs;
0208 ring->cycle_state = 1;
0209
0210 if (ring != &xdbc.evt_ring) {
0211 link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
0212 link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
0213 link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
0214 link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
0215 }
0216 }
0217
0218 static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
0219 {
0220 int i;
0221
0222 for (i = 0; i < size; i++)
0223 s[i] = cpu_to_le16(c[i]);
0224 }
0225
0226 static void xdbc_mem_init(void)
0227 {
0228 struct xdbc_ep_context *ep_in, *ep_out;
0229 struct usb_string_descriptor *s_desc;
0230 struct xdbc_erst_entry *entry;
0231 struct xdbc_strings *strings;
0232 struct xdbc_context *ctx;
0233 unsigned int max_burst;
0234 u32 string_length;
0235 int index = 0;
0236 u32 dev_info;
0237
0238 xdbc_reset_ring(&xdbc.evt_ring);
0239 xdbc_reset_ring(&xdbc.in_ring);
0240 xdbc_reset_ring(&xdbc.out_ring);
0241 memset(xdbc.table_base, 0, PAGE_SIZE);
0242 memset(xdbc.out_buf, 0, PAGE_SIZE);
0243
0244
0245 xdbc.erst_size = 16;
0246 xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
0247 xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
0248
0249 index += XDBC_ERST_ENTRY_NUM;
0250 entry = (struct xdbc_erst_entry *)xdbc.erst_base;
0251
0252 entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma);
0253 entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
0254 entry->__reserved_0 = 0;
0255
0256
0257 writel(1, &xdbc.xdbc_reg->ersts);
0258 xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
0259 xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
0260
0261
0262 xdbc.dbcc_size = 64 * 3;
0263 xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
0264 xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
0265
0266 index += XDBC_DBCC_ENTRY_NUM;
0267
0268
0269 xdbc.string_size = sizeof(struct xdbc_strings);
0270 xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
0271 xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
0272 strings = (struct xdbc_strings *)xdbc.string_base;
0273
0274 index += XDBC_STRING_ENTRY_NUM;
0275
0276
0277 s_desc = (struct usb_string_descriptor *)strings->serial;
0278 s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
0279 s_desc->bDescriptorType = USB_DT_STRING;
0280
0281 xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
0282 string_length = s_desc->bLength;
0283 string_length <<= 8;
0284
0285
0286 s_desc = (struct usb_string_descriptor *)strings->product;
0287 s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
0288 s_desc->bDescriptorType = USB_DT_STRING;
0289
0290 xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
0291 string_length += s_desc->bLength;
0292 string_length <<= 8;
0293
0294
0295 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
0296 s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
0297 s_desc->bDescriptorType = USB_DT_STRING;
0298
0299 xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
0300 string_length += s_desc->bLength;
0301 string_length <<= 8;
0302
0303
0304 strings->string0[0] = 4;
0305 strings->string0[1] = USB_DT_STRING;
0306 strings->string0[2] = 0x09;
0307 strings->string0[3] = 0x04;
0308
0309 string_length += 4;
0310
0311
0312 ctx = (struct xdbc_context *)xdbc.dbcc_base;
0313
0314 ctx->info.string0 = cpu_to_le64(xdbc.string_dma);
0315 ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
0316 ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
0317 ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
0318 ctx->info.length = cpu_to_le32(string_length);
0319
0320
0321 max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
0322 ep_out = (struct xdbc_ep_context *)&ctx->out;
0323
0324 ep_out->ep_info1 = 0;
0325 ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
0326 ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
0327
0328
0329 ep_in = (struct xdbc_ep_context *)&ctx->in;
0330
0331 ep_in->ep_info1 = 0;
0332 ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
0333 ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
0334
0335
0336 xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
0337
0338 dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
0339 writel(dev_info, &xdbc.xdbc_reg->devinfo1);
0340
0341 dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
0342 writel(dev_info, &xdbc.xdbc_reg->devinfo2);
0343
0344 xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
0345 xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
0346 }
0347
0348 static void xdbc_do_reset_debug_port(u32 id, u32 count)
0349 {
0350 void __iomem *ops_reg;
0351 void __iomem *portsc;
0352 u32 val, cap_length;
0353 int i;
0354
0355 cap_length = readl(xdbc.xhci_base) & 0xff;
0356 ops_reg = xdbc.xhci_base + cap_length;
0357
0358 id--;
0359 for (i = id; i < (id + count); i++) {
0360 portsc = ops_reg + 0x400 + i * 0x10;
0361 val = readl(portsc);
0362 if (!(val & PORT_CONNECT))
0363 writel(val | PORT_RESET, portsc);
0364 }
0365 }
0366
0367 static void xdbc_reset_debug_port(void)
0368 {
0369 u32 val, port_offset, port_count;
0370 int offset = 0;
0371
0372 do {
0373 offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
0374 if (!offset)
0375 break;
0376
0377 val = readl(xdbc.xhci_base + offset);
0378 if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
0379 continue;
0380
0381 val = readl(xdbc.xhci_base + offset + 8);
0382 port_offset = XHCI_EXT_PORT_OFF(val);
0383 port_count = XHCI_EXT_PORT_COUNT(val);
0384
0385 xdbc_do_reset_debug_port(port_offset, port_count);
0386 } while (1);
0387 }
0388
0389 static void
0390 xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
0391 {
0392 struct xdbc_trb *trb, *link_trb;
0393
0394 trb = ring->enqueue;
0395 trb->field[0] = cpu_to_le32(field1);
0396 trb->field[1] = cpu_to_le32(field2);
0397 trb->field[2] = cpu_to_le32(field3);
0398 trb->field[3] = cpu_to_le32(field4);
0399
0400 ++(ring->enqueue);
0401 if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
0402 link_trb = ring->enqueue;
0403 if (ring->cycle_state)
0404 link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
0405 else
0406 link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
0407
0408 ring->enqueue = ring->segment->trbs;
0409 ring->cycle_state ^= 1;
0410 }
0411 }
0412
0413 static void xdbc_ring_doorbell(int target)
0414 {
0415 writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
0416 }
0417
0418 static int xdbc_start(void)
0419 {
0420 u32 ctrl, status;
0421 int ret;
0422
0423 ctrl = readl(&xdbc.xdbc_reg->control);
0424 writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
0425 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
0426 if (ret) {
0427 xdbc_trace("failed to initialize hardware\n");
0428 return ret;
0429 }
0430
0431
0432 if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
0433 xdbc_reset_debug_port();
0434
0435
0436 ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
0437 if (ret) {
0438 xdbc_trace("waiting for connection timed out\n");
0439 return ret;
0440 }
0441
0442
0443 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
0444 if (ret) {
0445 xdbc_trace("waiting for device configuration timed out\n");
0446 return ret;
0447 }
0448
0449
0450 status = readl(&xdbc.xdbc_reg->status);
0451 if (!DCST_DEBUG_PORT(status)) {
0452 xdbc_trace("invalid root hub port number\n");
0453 return -ENODEV;
0454 }
0455
0456 xdbc.port_number = DCST_DEBUG_PORT(status);
0457
0458 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
0459 readl(&xdbc.xdbc_reg->control), xdbc.port_number);
0460
0461 return 0;
0462 }
0463
0464 static int xdbc_bulk_transfer(void *data, int size, bool read)
0465 {
0466 struct xdbc_ring *ring;
0467 struct xdbc_trb *trb;
0468 u32 length, control;
0469 u32 cycle;
0470 u64 addr;
0471
0472 if (size > XDBC_MAX_PACKET) {
0473 xdbc_trace("bad parameter, size %d\n", size);
0474 return -EINVAL;
0475 }
0476
0477 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
0478 !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
0479 (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
0480 (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
0481
0482 xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
0483 return -EIO;
0484 }
0485
0486 ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
0487 trb = ring->enqueue;
0488 cycle = ring->cycle_state;
0489 length = TRB_LEN(size);
0490 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
0491
0492 if (cycle)
0493 control &= cpu_to_le32(~TRB_CYCLE);
0494 else
0495 control |= cpu_to_le32(TRB_CYCLE);
0496
0497 if (read) {
0498 memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
0499 addr = xdbc.in_dma;
0500 xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
0501 } else {
0502 memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
0503 memcpy(xdbc.out_buf, data, size);
0504 addr = xdbc.out_dma;
0505 xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
0506 }
0507
0508 xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
0509
0510
0511
0512
0513
0514 wmb();
0515 if (cycle)
0516 trb->field[3] |= cpu_to_le32(cycle);
0517 else
0518 trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
0519
0520 xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
0521
0522 return size;
0523 }
0524
0525 static int xdbc_handle_external_reset(void)
0526 {
0527 int ret = 0;
0528
0529 xdbc.flags = 0;
0530 writel(0, &xdbc.xdbc_reg->control);
0531 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
0532 if (ret)
0533 goto reset_out;
0534
0535 xdbc_mem_init();
0536
0537 ret = xdbc_start();
0538 if (ret < 0)
0539 goto reset_out;
0540
0541 xdbc_trace("dbc recovered\n");
0542
0543 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
0544
0545 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
0546
0547 return 0;
0548
0549 reset_out:
0550 xdbc_trace("failed to recover from external reset\n");
0551 return ret;
0552 }
0553
0554 static int __init xdbc_early_setup(void)
0555 {
0556 int ret;
0557
0558 writel(0, &xdbc.xdbc_reg->control);
0559 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
0560 if (ret)
0561 return ret;
0562
0563
0564 xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
0565 if (!xdbc.table_base)
0566 return -ENOMEM;
0567
0568
0569 xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
0570 if (!xdbc.out_buf)
0571 return -ENOMEM;
0572
0573
0574 ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
0575 if (ret < 0)
0576 return ret;
0577
0578
0579 ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
0580 if (ret < 0)
0581 return ret;
0582
0583 ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
0584 if (ret < 0)
0585 return ret;
0586
0587 xdbc_mem_init();
0588
0589 ret = xdbc_start();
0590 if (ret < 0) {
0591 writel(0, &xdbc.xdbc_reg->control);
0592 return ret;
0593 }
0594
0595 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
0596
0597 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
0598
0599 return 0;
0600 }
0601
0602 int __init early_xdbc_parse_parameter(char *s, int keep_early)
0603 {
0604 unsigned long dbgp_num = 0;
0605 u32 bus, dev, func, offset;
0606 char *e;
0607 int ret;
0608
0609 if (!early_pci_allowed())
0610 return -EPERM;
0611
0612 early_console_keep = keep_early;
0613
0614 if (xdbc.xdbc_reg)
0615 return 0;
0616
0617 if (*s) {
0618 dbgp_num = simple_strtoul(s, &e, 10);
0619 if (s == e)
0620 dbgp_num = 0;
0621 }
0622
0623 pr_notice("dbgp_num: %lu\n", dbgp_num);
0624
0625
0626 ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
0627 if (ret) {
0628 pr_notice("failed to locate xhci host\n");
0629 return -ENODEV;
0630 }
0631
0632 xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
0633 xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
0634 xdbc.bus = bus;
0635 xdbc.dev = dev;
0636 xdbc.func = func;
0637
0638
0639 xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
0640 if (!xdbc.xhci_base)
0641 return -EINVAL;
0642
0643
0644 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
0645 if (!offset) {
0646 pr_notice("xhci host doesn't support debug capability\n");
0647 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
0648 xdbc.xhci_base = NULL;
0649 xdbc.xhci_length = 0;
0650
0651 return -ENODEV;
0652 }
0653 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
0654
0655 return 0;
0656 }
0657
0658 int __init early_xdbc_setup_hardware(void)
0659 {
0660 int ret;
0661
0662 if (!xdbc.xdbc_reg)
0663 return -ENODEV;
0664
0665 xdbc_bios_handoff();
0666
0667 raw_spin_lock_init(&xdbc.lock);
0668
0669 ret = xdbc_early_setup();
0670 if (ret) {
0671 pr_notice("failed to setup the connection to host\n");
0672
0673 xdbc_free_ring(&xdbc.evt_ring);
0674 xdbc_free_ring(&xdbc.out_ring);
0675 xdbc_free_ring(&xdbc.in_ring);
0676
0677 if (xdbc.table_dma)
0678 memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
0679
0680 if (xdbc.out_dma)
0681 memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
0682
0683 xdbc.table_base = NULL;
0684 xdbc.out_buf = NULL;
0685 }
0686
0687 return ret;
0688 }
0689
0690 static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
0691 {
0692 u32 port_reg;
0693
0694 port_reg = readl(&xdbc.xdbc_reg->portsc);
0695 if (port_reg & PORTSC_CONN_CHANGE) {
0696 xdbc_trace("connect status change event\n");
0697
0698
0699 if (!(port_reg & PORTSC_CONN_STATUS)) {
0700 xdbc.flags = 0;
0701 xdbc_trace("cable unplugged\n");
0702 }
0703 }
0704
0705 if (port_reg & PORTSC_RESET_CHANGE)
0706 xdbc_trace("port reset change event\n");
0707
0708 if (port_reg & PORTSC_LINK_CHANGE)
0709 xdbc_trace("port link status change event\n");
0710
0711 if (port_reg & PORTSC_CONFIG_CHANGE)
0712 xdbc_trace("config error change\n");
0713
0714
0715 writel(port_reg, &xdbc.xdbc_reg->portsc);
0716 }
0717
0718 static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
0719 {
0720 u32 comp_code;
0721 int ep_id;
0722
0723 comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
0724 ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
0725
0726 switch (comp_code) {
0727 case COMP_SUCCESS:
0728 case COMP_SHORT_PACKET:
0729 break;
0730 case COMP_TRB_ERROR:
0731 case COMP_BABBLE_DETECTED_ERROR:
0732 case COMP_USB_TRANSACTION_ERROR:
0733 case COMP_STALL_ERROR:
0734 default:
0735 if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
0736 xdbc.flags |= XDBC_FLAGS_OUT_STALL;
0737 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
0738 xdbc.flags |= XDBC_FLAGS_IN_STALL;
0739
0740 xdbc_trace("endpoint %d stalled\n", ep_id);
0741 break;
0742 }
0743
0744 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
0745 xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
0746 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
0747 } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
0748 xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
0749 } else {
0750 xdbc_trace("invalid endpoint id %d\n", ep_id);
0751 }
0752 }
0753
0754 static void xdbc_handle_events(void)
0755 {
0756 struct xdbc_trb *evt_trb;
0757 bool update_erdp = false;
0758 u32 reg;
0759 u8 cmd;
0760
0761 cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
0762 if (!(cmd & PCI_COMMAND_MASTER)) {
0763 cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
0764 write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
0765 }
0766
0767 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
0768 return;
0769
0770
0771 reg = readl(&xdbc.xdbc_reg->control);
0772 if (!(reg & CTRL_DBC_ENABLE)) {
0773 if (xdbc_handle_external_reset()) {
0774 xdbc_trace("failed to recover connection\n");
0775 return;
0776 }
0777 }
0778
0779
0780 reg = readl(&xdbc.xdbc_reg->control);
0781 if (reg & CTRL_DBC_RUN_CHANGE) {
0782 writel(reg, &xdbc.xdbc_reg->control);
0783 if (reg & CTRL_DBC_RUN)
0784 xdbc.flags |= XDBC_FLAGS_CONFIGURED;
0785 else
0786 xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
0787 }
0788
0789
0790 reg = readl(&xdbc.xdbc_reg->control);
0791 if (reg & CTRL_HALT_IN_TR) {
0792 xdbc.flags |= XDBC_FLAGS_IN_STALL;
0793 } else {
0794 xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
0795 if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
0796 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
0797 }
0798
0799 if (reg & CTRL_HALT_OUT_TR)
0800 xdbc.flags |= XDBC_FLAGS_OUT_STALL;
0801 else
0802 xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
0803
0804
0805 evt_trb = xdbc.evt_ring.dequeue;
0806 while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
0807
0808
0809
0810
0811 rmb();
0812
0813 switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
0814 case TRB_TYPE(TRB_PORT_STATUS):
0815 xdbc_handle_port_status(evt_trb);
0816 break;
0817 case TRB_TYPE(TRB_TRANSFER):
0818 xdbc_handle_tx_event(evt_trb);
0819 break;
0820 default:
0821 break;
0822 }
0823
0824 ++(xdbc.evt_ring.dequeue);
0825 if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
0826 xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
0827 xdbc.evt_ring.cycle_state ^= 1;
0828 }
0829
0830 evt_trb = xdbc.evt_ring.dequeue;
0831 update_erdp = true;
0832 }
0833
0834
0835 if (update_erdp)
0836 xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
0837 }
0838
0839 static int xdbc_bulk_write(const char *bytes, int size)
0840 {
0841 int ret, timeout = 0;
0842 unsigned long flags;
0843
0844 retry:
0845 if (in_nmi()) {
0846 if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
0847 return -EAGAIN;
0848 } else {
0849 raw_spin_lock_irqsave(&xdbc.lock, flags);
0850 }
0851
0852 xdbc_handle_events();
0853
0854
0855 if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
0856 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0857 udelay(100);
0858 timeout += 100;
0859 goto retry;
0860 }
0861
0862 if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
0863 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0864 xdbc_trace("previous transfer not completed yet\n");
0865
0866 return -ETIMEDOUT;
0867 }
0868
0869 ret = xdbc_bulk_transfer((void *)bytes, size, false);
0870 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0871
0872 return ret;
0873 }
0874
0875 static void early_xdbc_write(struct console *con, const char *str, u32 n)
0876 {
0877 static char buf[XDBC_MAX_PACKET];
0878 int chunk, ret;
0879 int use_cr = 0;
0880
0881 if (!xdbc.xdbc_reg)
0882 return;
0883 memset(buf, 0, XDBC_MAX_PACKET);
0884 while (n > 0) {
0885 for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
0886
0887 if (!use_cr && *str == '\n') {
0888 use_cr = 1;
0889 buf[chunk] = '\r';
0890 str--;
0891 n++;
0892 continue;
0893 }
0894
0895 if (use_cr)
0896 use_cr = 0;
0897 buf[chunk] = *str;
0898 }
0899
0900 if (chunk > 0) {
0901 ret = xdbc_bulk_write(buf, chunk);
0902 if (ret < 0)
0903 xdbc_trace("missed message {%s}\n", buf);
0904 }
0905 }
0906 }
0907
0908 static struct console early_xdbc_console = {
0909 .name = "earlyxdbc",
0910 .write = early_xdbc_write,
0911 .flags = CON_PRINTBUFFER,
0912 .index = -1,
0913 };
0914
0915 void __init early_xdbc_register_console(void)
0916 {
0917 if (early_console)
0918 return;
0919
0920 early_console = &early_xdbc_console;
0921 if (early_console_keep)
0922 early_console->flags &= ~CON_BOOT;
0923 else
0924 early_console->flags |= CON_BOOT;
0925 register_console(early_console);
0926 }
0927
0928 static void xdbc_unregister_console(void)
0929 {
0930 if (early_xdbc_console.flags & CON_ENABLED)
0931 unregister_console(&early_xdbc_console);
0932 }
0933
0934 static int xdbc_scrub_function(void *ptr)
0935 {
0936 unsigned long flags;
0937
0938 while (true) {
0939 raw_spin_lock_irqsave(&xdbc.lock, flags);
0940 xdbc_handle_events();
0941
0942 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
0943 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0944 break;
0945 }
0946
0947 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0948 schedule_timeout_interruptible(1);
0949 }
0950
0951 xdbc_unregister_console();
0952 writel(0, &xdbc.xdbc_reg->control);
0953 xdbc_trace("dbc scrub function exits\n");
0954
0955 return 0;
0956 }
0957
0958 static int __init xdbc_init(void)
0959 {
0960 unsigned long flags;
0961 void __iomem *base;
0962 int ret = 0;
0963 u32 offset;
0964
0965 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
0966 return 0;
0967
0968
0969
0970
0971
0972 if (early_xdbc_console.index == -1 ||
0973 (early_xdbc_console.flags & CON_BOOT)) {
0974 xdbc_trace("hardware not used anymore\n");
0975 goto free_and_quit;
0976 }
0977
0978 base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
0979 if (!base) {
0980 xdbc_trace("failed to remap the io address\n");
0981 ret = -ENOMEM;
0982 goto free_and_quit;
0983 }
0984
0985 raw_spin_lock_irqsave(&xdbc.lock, flags);
0986 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
0987 xdbc.xhci_base = base;
0988 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
0989 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
0990 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
0991
0992 kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
0993
0994 return 0;
0995
0996 free_and_quit:
0997 xdbc_free_ring(&xdbc.evt_ring);
0998 xdbc_free_ring(&xdbc.out_ring);
0999 xdbc_free_ring(&xdbc.in_ring);
1000 memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
1001 memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
1002 writel(0, &xdbc.xdbc_reg->control);
1003 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
1004
1005 return ret;
1006 }
1007 subsys_initcall(xdbc_init);