0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitops.h>
0010 #include <linux/bug.h>
0011 #include <linux/clk.h>
0012 #include <linux/compiler.h>
0013 #include <linux/debugfs.h>
0014 #include <linux/delay.h>
0015 #include <linux/device.h>
0016 #include <linux/dma-mapping.h>
0017 #include <linux/errno.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/ioport.h>
0020 #include <linux/kernel.h>
0021 #include <linux/list.h>
0022 #include <linux/module.h>
0023 #include <linux/moduleparam.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/sched.h>
0026 #include <linux/seq_file.h>
0027 #include <linux/slab.h>
0028 #include <linux/timer.h>
0029 #include <linux/usb.h>
0030 #include <linux/usb/ch9.h>
0031 #include <linux/usb/gadget.h>
0032 #include <linux/workqueue.h>
0033
0034 #include <bcm63xx_cpu.h>
0035 #include <bcm63xx_iudma.h>
0036 #include <bcm63xx_dev_usb_usbd.h>
0037 #include <bcm63xx_io.h>
0038 #include <bcm63xx_regs.h>
0039
0040 #define DRV_MODULE_NAME "bcm63xx_udc"
0041
0042 static const char bcm63xx_ep0name[] = "ep0";
0043
0044 static const struct {
0045 const char *name;
0046 const struct usb_ep_caps caps;
0047 } bcm63xx_ep_info[] = {
0048 #define EP_INFO(_name, _caps) \
0049 { \
0050 .name = _name, \
0051 .caps = _caps, \
0052 }
0053
0054 EP_INFO(bcm63xx_ep0name,
0055 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
0056 EP_INFO("ep1in-bulk",
0057 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
0058 EP_INFO("ep2out-bulk",
0059 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
0060 EP_INFO("ep3in-int",
0061 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
0062 EP_INFO("ep4out-int",
0063 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
0064
0065 #undef EP_INFO
0066 };
0067
0068 static bool use_fullspeed;
0069 module_param(use_fullspeed, bool, S_IRUGO);
0070 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 static bool irq_coalesce;
0092 module_param(irq_coalesce, bool, S_IRUGO);
0093 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
0094
0095 #define BCM63XX_NUM_EP 5
0096 #define BCM63XX_NUM_IUDMA 6
0097 #define BCM63XX_NUM_FIFO_PAIRS 3
0098
0099 #define IUDMA_RESET_TIMEOUT_US 10000
0100
0101 #define IUDMA_EP0_RXCHAN 0
0102 #define IUDMA_EP0_TXCHAN 1
0103
0104 #define IUDMA_MAX_FRAGMENT 2048
0105 #define BCM63XX_MAX_CTRL_PKT 64
0106
0107 #define BCMEP_CTRL 0x00
0108 #define BCMEP_ISOC 0x01
0109 #define BCMEP_BULK 0x02
0110 #define BCMEP_INTR 0x03
0111
0112 #define BCMEP_OUT 0x00
0113 #define BCMEP_IN 0x01
0114
0115 #define BCM63XX_SPD_FULL 1
0116 #define BCM63XX_SPD_HIGH 0
0117
0118 #define IUDMA_DMAC_OFFSET 0x200
0119 #define IUDMA_DMAS_OFFSET 0x400
0120
0121 enum bcm63xx_ep0_state {
0122 EP0_REQUEUE,
0123 EP0_IDLE,
0124 EP0_IN_DATA_PHASE_SETUP,
0125 EP0_IN_DATA_PHASE_COMPLETE,
0126 EP0_OUT_DATA_PHASE_SETUP,
0127 EP0_OUT_DATA_PHASE_COMPLETE,
0128 EP0_OUT_STATUS_PHASE,
0129 EP0_IN_FAKE_STATUS_PHASE,
0130 EP0_SHUTDOWN,
0131 };
0132
0133 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
0134 "REQUEUE",
0135 "IDLE",
0136 "IN_DATA_PHASE_SETUP",
0137 "IN_DATA_PHASE_COMPLETE",
0138 "OUT_DATA_PHASE_SETUP",
0139 "OUT_DATA_PHASE_COMPLETE",
0140 "OUT_STATUS_PHASE",
0141 "IN_FAKE_STATUS_PHASE",
0142 "SHUTDOWN",
0143 };
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 struct iudma_ch_cfg {
0156 int ep_num;
0157 int n_bds;
0158 int ep_type;
0159 int dir;
0160 int n_fifo_slots;
0161 int max_pkt_hs;
0162 int max_pkt_fs;
0163 };
0164
0165 static const struct iudma_ch_cfg iudma_defaults[] = {
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
0177 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
0178 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
0179 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
0180 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
0181 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
0182 };
0183
0184 struct bcm63xx_udc;
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 struct iudma_ch {
0211 unsigned int ch_idx;
0212 int ep_num;
0213 bool enabled;
0214 int max_pkt;
0215 bool is_tx;
0216 struct bcm63xx_ep *bep;
0217 struct bcm63xx_udc *udc;
0218
0219 struct bcm_enet_desc *read_bd;
0220 struct bcm_enet_desc *write_bd;
0221 struct bcm_enet_desc *end_bd;
0222 int n_bds_used;
0223
0224 struct bcm_enet_desc *bd_ring;
0225 dma_addr_t bd_ring_dma;
0226 unsigned int n_bds;
0227 };
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 struct bcm63xx_ep {
0239 unsigned int ep_num;
0240 struct iudma_ch *iudma;
0241 struct usb_ep ep;
0242 struct bcm63xx_udc *udc;
0243 struct list_head queue;
0244 unsigned halted:1;
0245 };
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 struct bcm63xx_req {
0256 struct list_head queue;
0257 struct usb_request req;
0258 unsigned int offset;
0259 unsigned int bd_bytes;
0260 struct iudma_ch *iudma;
0261 };
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 struct bcm63xx_udc {
0293 spinlock_t lock;
0294
0295 struct device *dev;
0296 struct bcm63xx_usbd_platform_data *pd;
0297 struct clk *usbd_clk;
0298 struct clk *usbh_clk;
0299
0300 struct usb_gadget gadget;
0301 struct usb_gadget_driver *driver;
0302
0303 void __iomem *usbd_regs;
0304 void __iomem *iudma_regs;
0305
0306 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
0307 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
0308
0309 int cfg;
0310 int iface;
0311 int alt_iface;
0312
0313 struct bcm63xx_req ep0_ctrl_req;
0314 u8 *ep0_ctrl_buf;
0315
0316 int ep0state;
0317 struct work_struct ep0_wq;
0318
0319 unsigned long wedgemap;
0320
0321 unsigned ep0_req_reset:1;
0322 unsigned ep0_req_set_cfg:1;
0323 unsigned ep0_req_set_iface:1;
0324 unsigned ep0_req_shutdown:1;
0325
0326 unsigned ep0_req_completed:1;
0327 struct usb_request *ep0_reply;
0328 struct usb_request *ep0_request;
0329 };
0330
0331 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
0332
0333
0334
0335
0336
0337 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
0338 {
0339 return container_of(g, struct bcm63xx_udc, gadget);
0340 }
0341
0342 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
0343 {
0344 return container_of(ep, struct bcm63xx_ep, ep);
0345 }
0346
0347 static inline struct bcm63xx_req *our_req(struct usb_request *req)
0348 {
0349 return container_of(req, struct bcm63xx_req, req);
0350 }
0351
0352 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
0353 {
0354 return bcm_readl(udc->usbd_regs + off);
0355 }
0356
0357 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
0358 {
0359 bcm_writel(val, udc->usbd_regs + off);
0360 }
0361
0362 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
0363 {
0364 return bcm_readl(udc->iudma_regs + off);
0365 }
0366
0367 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
0368 {
0369 bcm_writel(val, udc->iudma_regs + off);
0370 }
0371
0372 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
0373 {
0374 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
0375 (ENETDMA_CHAN_WIDTH * chan));
0376 }
0377
0378 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
0379 int chan)
0380 {
0381 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
0382 (ENETDMA_CHAN_WIDTH * chan));
0383 }
0384
0385 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
0386 {
0387 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
0388 (ENETDMA_CHAN_WIDTH * chan));
0389 }
0390
0391 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
0392 int chan)
0393 {
0394 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
0395 (ENETDMA_CHAN_WIDTH * chan));
0396 }
0397
0398 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
0399 {
0400 if (is_enabled) {
0401 clk_enable(udc->usbh_clk);
0402 clk_enable(udc->usbd_clk);
0403 udelay(10);
0404 } else {
0405 clk_disable(udc->usbd_clk);
0406 clk_disable(udc->usbh_clk);
0407 }
0408 }
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
0424 {
0425 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
0426
0427 val &= ~USBD_CONTROL_INIT_SEL_MASK;
0428 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
0429 usbd_writel(udc, val, USBD_CONTROL_REG);
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
0442 bool is_stalled)
0443 {
0444 u32 val;
0445
0446 val = USBD_STALL_UPDATE_MASK |
0447 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
0448 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
0449 usbd_writel(udc, val, USBD_STALL_REG);
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
0460 {
0461 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
0462 u32 i, val, rx_fifo_slot, tx_fifo_slot;
0463
0464
0465 rx_fifo_slot = tx_fifo_slot = 0;
0466 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
0467 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
0468 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
0469
0470 bcm63xx_ep_dma_select(udc, i >> 1);
0471
0472 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
0473 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
0474 USBD_RXFIFO_CONFIG_END_SHIFT);
0475 rx_fifo_slot += rx_cfg->n_fifo_slots;
0476 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
0477 usbd_writel(udc,
0478 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
0479 USBD_RXFIFO_EPSIZE_REG);
0480
0481 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
0482 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
0483 USBD_TXFIFO_CONFIG_END_SHIFT);
0484 tx_fifo_slot += tx_cfg->n_fifo_slots;
0485 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
0486 usbd_writel(udc,
0487 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
0488 USBD_TXFIFO_EPSIZE_REG);
0489
0490 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
0491 }
0492 }
0493
0494
0495
0496
0497
0498
0499 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
0500 {
0501 u32 val;
0502
0503 bcm63xx_ep_dma_select(udc, ep_num);
0504
0505 val = usbd_readl(udc, USBD_CONTROL_REG);
0506 val |= USBD_CONTROL_FIFO_RESET_MASK;
0507 usbd_writel(udc, val, USBD_CONTROL_REG);
0508 usbd_readl(udc, USBD_CONTROL_REG);
0509 }
0510
0511
0512
0513
0514
0515 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
0516 {
0517 int i;
0518
0519 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
0520 bcm63xx_fifo_reset_ep(udc, i);
0521 }
0522
0523
0524
0525
0526
0527 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
0528 {
0529 u32 i, val;
0530
0531 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0532 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
0533
0534 if (cfg->ep_num < 0)
0535 continue;
0536
0537 bcm63xx_ep_dma_select(udc, cfg->ep_num);
0538 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
0539 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
0540 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
0541 }
0542 }
0543
0544
0545
0546
0547
0548
0549
0550 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
0551 {
0552 u32 val, i;
0553
0554 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
0555
0556 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0557 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
0558 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
0559 cfg->max_pkt_hs : cfg->max_pkt_fs;
0560 int idx = cfg->ep_num;
0561
0562 udc->iudma[i].max_pkt = max_pkt;
0563
0564 if (idx < 0)
0565 continue;
0566 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
0567
0568 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
0569 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
0570 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
0571 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
0572 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
0573 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
0574 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
0575 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
0576 }
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
0593 struct bcm63xx_req *breq)
0594 {
0595 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
0596 unsigned int bytes_left = breq->req.length - breq->offset;
0597 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
0598 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
0599
0600 iudma->n_bds_used = 0;
0601 breq->bd_bytes = 0;
0602 breq->iudma = iudma;
0603
0604 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
0605 extra_zero_pkt = 1;
0606
0607 do {
0608 struct bcm_enet_desc *d = iudma->write_bd;
0609 u32 dmaflags = 0;
0610 unsigned int n_bytes;
0611
0612 if (d == iudma->end_bd) {
0613 dmaflags |= DMADESC_WRAP_MASK;
0614 iudma->write_bd = iudma->bd_ring;
0615 } else {
0616 iudma->write_bd++;
0617 }
0618 iudma->n_bds_used++;
0619
0620 n_bytes = min_t(int, bytes_left, max_bd_bytes);
0621 if (n_bytes)
0622 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
0623 else
0624 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
0625 DMADESC_USB_ZERO_MASK;
0626
0627 dmaflags |= DMADESC_OWNER_MASK;
0628 if (first_bd) {
0629 dmaflags |= DMADESC_SOP_MASK;
0630 first_bd = 0;
0631 }
0632
0633
0634
0635
0636
0637 if (extra_zero_pkt && !bytes_left)
0638 extra_zero_pkt = 0;
0639
0640 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
0641 (n_bytes == bytes_left && !extra_zero_pkt)) {
0642 last_bd = 1;
0643 dmaflags |= DMADESC_EOP_MASK;
0644 }
0645
0646 d->address = breq->req.dma + breq->offset;
0647 mb();
0648 d->len_stat = dmaflags;
0649
0650 breq->offset += n_bytes;
0651 breq->bd_bytes += n_bytes;
0652 bytes_left -= n_bytes;
0653 } while (!last_bd);
0654
0655 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
0656 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
0669 {
0670 int i, actual_len = 0;
0671 struct bcm_enet_desc *d = iudma->read_bd;
0672
0673 if (!iudma->n_bds_used)
0674 return -EINVAL;
0675
0676 for (i = 0; i < iudma->n_bds_used; i++) {
0677 u32 dmaflags;
0678
0679 dmaflags = d->len_stat;
0680
0681 if (dmaflags & DMADESC_OWNER_MASK)
0682 return -EBUSY;
0683
0684 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
0685 DMADESC_LENGTH_SHIFT;
0686 if (d == iudma->end_bd)
0687 d = iudma->bd_ring;
0688 else
0689 d++;
0690 }
0691
0692 iudma->read_bd = d;
0693 iudma->n_bds_used = 0;
0694 return actual_len;
0695 }
0696
0697
0698
0699
0700
0701
0702 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
0703 {
0704 int timeout = IUDMA_RESET_TIMEOUT_US;
0705 struct bcm_enet_desc *d;
0706 int ch_idx = iudma->ch_idx;
0707
0708 if (!iudma->is_tx)
0709 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
0710
0711
0712 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
0713
0714 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
0715 ENETDMAC_CHANCFG_EN_MASK) {
0716 udelay(1);
0717
0718
0719 if (iudma->is_tx && iudma->ep_num >= 0)
0720 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
0721
0722 if (!timeout--) {
0723 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
0724 ch_idx);
0725 break;
0726 }
0727 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
0728 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
0729 ch_idx);
0730 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
0731 ENETDMAC_CHANCFG_REG, ch_idx);
0732 }
0733 }
0734 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
0735
0736
0737 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
0738 d->len_stat = 0;
0739 mb();
0740
0741 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
0742 iudma->n_bds_used = 0;
0743
0744
0745 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
0746 ENETDMAC_IRMASK_REG, ch_idx);
0747 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
0748
0749 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
0750 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
0751 }
0752
0753
0754
0755
0756
0757
0758 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
0759 {
0760 struct iudma_ch *iudma = &udc->iudma[ch_idx];
0761 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
0762 unsigned int n_bds = cfg->n_bds;
0763 struct bcm63xx_ep *bep = NULL;
0764
0765 iudma->ep_num = cfg->ep_num;
0766 iudma->ch_idx = ch_idx;
0767 iudma->is_tx = !!(ch_idx & 0x01);
0768 if (iudma->ep_num >= 0) {
0769 bep = &udc->bep[iudma->ep_num];
0770 bep->iudma = iudma;
0771 INIT_LIST_HEAD(&bep->queue);
0772 }
0773
0774 iudma->bep = bep;
0775 iudma->udc = udc;
0776
0777
0778 if (iudma->ep_num <= 0)
0779 iudma->enabled = true;
0780
0781 iudma->n_bds = n_bds;
0782 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
0783 n_bds * sizeof(struct bcm_enet_desc),
0784 &iudma->bd_ring_dma, GFP_KERNEL);
0785 if (!iudma->bd_ring)
0786 return -ENOMEM;
0787 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
0788
0789 return 0;
0790 }
0791
0792
0793
0794
0795
0796
0797
0798 static int iudma_init(struct bcm63xx_udc *udc)
0799 {
0800 int i, rc;
0801
0802 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
0803
0804 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0805 rc = iudma_init_channel(udc, i);
0806 if (rc)
0807 return rc;
0808 iudma_reset_channel(udc, &udc->iudma[i]);
0809 }
0810
0811 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
0812 return 0;
0813 }
0814
0815
0816
0817
0818
0819
0820
0821 static void iudma_uninit(struct bcm63xx_udc *udc)
0822 {
0823 int i;
0824
0825 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
0826
0827 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
0828 iudma_reset_channel(udc, &udc->iudma[i]);
0829
0830 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
0831 }
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
0843 {
0844 u32 val;
0845
0846 usbd_writel(udc, 0, USBD_STATUS_REG);
0847
0848 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
0849 BIT(USBD_EVENT_IRQ_SETUP) |
0850 BIT(USBD_EVENT_IRQ_SETCFG) |
0851 BIT(USBD_EVENT_IRQ_SETINTF) |
0852 BIT(USBD_EVENT_IRQ_USB_LINK);
0853 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
0854 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
0869 {
0870 u32 val, portmask = BIT(udc->pd->port_no);
0871
0872 if (BCMCPU_IS_6328()) {
0873
0874 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
0875 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
0876 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
0877 GPIO_PINMUX_OTHR_6328_USB_HOST;
0878 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
0879 }
0880
0881 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
0882 if (is_device) {
0883 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
0884 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0885 } else {
0886 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
0887 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0888 }
0889 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
0890
0891 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
0892 if (is_device)
0893 val |= USBH_PRIV_SWAP_USBD_MASK;
0894 else
0895 val &= ~USBH_PRIV_SWAP_USBD_MASK;
0896 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
0897 }
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
0909 {
0910 u32 val, portmask = BIT(udc->pd->port_no);
0911
0912 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
0913 if (is_on)
0914 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0915 else
0916 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0917 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
0918 }
0919
0920
0921
0922
0923
0924
0925
0926
0927 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
0928 {
0929 set_clocks(udc, true);
0930 iudma_uninit(udc);
0931 set_clocks(udc, false);
0932
0933 clk_put(udc->usbd_clk);
0934 clk_put(udc->usbh_clk);
0935 }
0936
0937
0938
0939
0940
0941 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
0942 {
0943 int i, rc = 0;
0944 u32 val;
0945
0946 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
0947 GFP_KERNEL);
0948 if (!udc->ep0_ctrl_buf)
0949 return -ENOMEM;
0950
0951 INIT_LIST_HEAD(&udc->gadget.ep_list);
0952 for (i = 0; i < BCM63XX_NUM_EP; i++) {
0953 struct bcm63xx_ep *bep = &udc->bep[i];
0954
0955 bep->ep.name = bcm63xx_ep_info[i].name;
0956 bep->ep.caps = bcm63xx_ep_info[i].caps;
0957 bep->ep_num = i;
0958 bep->ep.ops = &bcm63xx_udc_ep_ops;
0959 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
0960 bep->halted = 0;
0961 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
0962 bep->udc = udc;
0963 bep->ep.desc = NULL;
0964 INIT_LIST_HEAD(&bep->queue);
0965 }
0966
0967 udc->gadget.ep0 = &udc->bep[0].ep;
0968 list_del(&udc->bep[0].ep.ep_list);
0969
0970 udc->gadget.speed = USB_SPEED_UNKNOWN;
0971 udc->ep0state = EP0_SHUTDOWN;
0972
0973 udc->usbh_clk = clk_get(udc->dev, "usbh");
0974 if (IS_ERR(udc->usbh_clk))
0975 return -EIO;
0976
0977 udc->usbd_clk = clk_get(udc->dev, "usbd");
0978 if (IS_ERR(udc->usbd_clk)) {
0979 clk_put(udc->usbh_clk);
0980 return -EIO;
0981 }
0982
0983 set_clocks(udc, true);
0984
0985 val = USBD_CONTROL_AUTO_CSRS_MASK |
0986 USBD_CONTROL_DONE_CSRS_MASK |
0987 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
0988 usbd_writel(udc, val, USBD_CONTROL_REG);
0989
0990 val = USBD_STRAPS_APP_SELF_PWR_MASK |
0991 USBD_STRAPS_APP_RAM_IF_MASK |
0992 USBD_STRAPS_APP_CSRPRGSUP_MASK |
0993 USBD_STRAPS_APP_8BITPHY_MASK |
0994 USBD_STRAPS_APP_RMTWKUP_MASK;
0995
0996 if (udc->gadget.max_speed == USB_SPEED_HIGH)
0997 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
0998 else
0999 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1000 usbd_writel(udc, val, USBD_STRAPS_REG);
1001
1002 bcm63xx_set_ctrl_irqs(udc, false);
1003
1004 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1005
1006 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1007 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1008 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1009
1010 rc = iudma_init(udc);
1011 set_clocks(udc, false);
1012 if (rc)
1013 bcm63xx_uninit_udc_hw(udc);
1014
1015 return 0;
1016 }
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 static int bcm63xx_ep_enable(struct usb_ep *ep,
1031 const struct usb_endpoint_descriptor *desc)
1032 {
1033 struct bcm63xx_ep *bep = our_ep(ep);
1034 struct bcm63xx_udc *udc = bep->udc;
1035 struct iudma_ch *iudma = bep->iudma;
1036 unsigned long flags;
1037
1038 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1039 return -EINVAL;
1040
1041 if (!udc->driver)
1042 return -ESHUTDOWN;
1043
1044 spin_lock_irqsave(&udc->lock, flags);
1045 if (iudma->enabled) {
1046 spin_unlock_irqrestore(&udc->lock, flags);
1047 return -EINVAL;
1048 }
1049
1050 iudma->enabled = true;
1051 BUG_ON(!list_empty(&bep->queue));
1052
1053 iudma_reset_channel(udc, iudma);
1054
1055 bep->halted = 0;
1056 bcm63xx_set_stall(udc, bep, false);
1057 clear_bit(bep->ep_num, &udc->wedgemap);
1058
1059 ep->desc = desc;
1060 ep->maxpacket = usb_endpoint_maxp(desc);
1061
1062 spin_unlock_irqrestore(&udc->lock, flags);
1063 return 0;
1064 }
1065
1066
1067
1068
1069
1070 static int bcm63xx_ep_disable(struct usb_ep *ep)
1071 {
1072 struct bcm63xx_ep *bep = our_ep(ep);
1073 struct bcm63xx_udc *udc = bep->udc;
1074 struct iudma_ch *iudma = bep->iudma;
1075 struct bcm63xx_req *breq, *n;
1076 unsigned long flags;
1077
1078 if (!ep || !ep->desc)
1079 return -EINVAL;
1080
1081 spin_lock_irqsave(&udc->lock, flags);
1082 if (!iudma->enabled) {
1083 spin_unlock_irqrestore(&udc->lock, flags);
1084 return -EINVAL;
1085 }
1086 iudma->enabled = false;
1087
1088 iudma_reset_channel(udc, iudma);
1089
1090 if (!list_empty(&bep->queue)) {
1091 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1092 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1093 iudma->is_tx);
1094 list_del(&breq->queue);
1095 breq->req.status = -ESHUTDOWN;
1096
1097 spin_unlock_irqrestore(&udc->lock, flags);
1098 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1099 spin_lock_irqsave(&udc->lock, flags);
1100 }
1101 }
1102 ep->desc = NULL;
1103
1104 spin_unlock_irqrestore(&udc->lock, flags);
1105 return 0;
1106 }
1107
1108
1109
1110
1111
1112
1113 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1114 gfp_t mem_flags)
1115 {
1116 struct bcm63xx_req *breq;
1117
1118 breq = kzalloc(sizeof(*breq), mem_flags);
1119 if (!breq)
1120 return NULL;
1121 return &breq->req;
1122 }
1123
1124
1125
1126
1127
1128
1129 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1130 struct usb_request *req)
1131 {
1132 struct bcm63xx_req *breq = our_req(req);
1133 kfree(breq);
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1151 gfp_t mem_flags)
1152 {
1153 struct bcm63xx_ep *bep = our_ep(ep);
1154 struct bcm63xx_udc *udc = bep->udc;
1155 struct bcm63xx_req *breq = our_req(req);
1156 unsigned long flags;
1157 int rc = 0;
1158
1159 if (unlikely(!req || !req->complete || !req->buf || !ep))
1160 return -EINVAL;
1161
1162 req->actual = 0;
1163 req->status = 0;
1164 breq->offset = 0;
1165
1166 if (bep == &udc->bep[0]) {
1167
1168 if (udc->ep0_reply)
1169 return -EINVAL;
1170
1171 udc->ep0_reply = req;
1172 schedule_work(&udc->ep0_wq);
1173 return 0;
1174 }
1175
1176 spin_lock_irqsave(&udc->lock, flags);
1177 if (!bep->iudma->enabled) {
1178 rc = -ESHUTDOWN;
1179 goto out;
1180 }
1181
1182 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1183 if (rc == 0) {
1184 list_add_tail(&breq->queue, &bep->queue);
1185 if (list_is_singular(&bep->queue))
1186 iudma_write(udc, bep->iudma, breq);
1187 }
1188
1189 out:
1190 spin_unlock_irqrestore(&udc->lock, flags);
1191 return rc;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1204 {
1205 struct bcm63xx_ep *bep = our_ep(ep);
1206 struct bcm63xx_udc *udc = bep->udc;
1207 struct bcm63xx_req *breq = our_req(req), *cur;
1208 unsigned long flags;
1209 int rc = 0;
1210
1211 spin_lock_irqsave(&udc->lock, flags);
1212 if (list_empty(&bep->queue)) {
1213 rc = -EINVAL;
1214 goto out;
1215 }
1216
1217 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1218 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1219
1220 if (breq == cur) {
1221 iudma_reset_channel(udc, bep->iudma);
1222 list_del(&breq->queue);
1223
1224 if (!list_empty(&bep->queue)) {
1225 struct bcm63xx_req *next;
1226
1227 next = list_first_entry(&bep->queue,
1228 struct bcm63xx_req, queue);
1229 iudma_write(udc, bep->iudma, next);
1230 }
1231 } else {
1232 list_del(&breq->queue);
1233 }
1234
1235 out:
1236 spin_unlock_irqrestore(&udc->lock, flags);
1237
1238 req->status = -ESHUTDOWN;
1239 req->complete(ep, req);
1240
1241 return rc;
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1252 {
1253 struct bcm63xx_ep *bep = our_ep(ep);
1254 struct bcm63xx_udc *udc = bep->udc;
1255 unsigned long flags;
1256
1257 spin_lock_irqsave(&udc->lock, flags);
1258 bcm63xx_set_stall(udc, bep, !!value);
1259 bep->halted = value;
1260 spin_unlock_irqrestore(&udc->lock, flags);
1261
1262 return 0;
1263 }
1264
1265
1266
1267
1268
1269
1270
1271 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1272 {
1273 struct bcm63xx_ep *bep = our_ep(ep);
1274 struct bcm63xx_udc *udc = bep->udc;
1275 unsigned long flags;
1276
1277 spin_lock_irqsave(&udc->lock, flags);
1278 set_bit(bep->ep_num, &udc->wedgemap);
1279 bcm63xx_set_stall(udc, bep, true);
1280 spin_unlock_irqrestore(&udc->lock, flags);
1281
1282 return 0;
1283 }
1284
1285 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1286 .enable = bcm63xx_ep_enable,
1287 .disable = bcm63xx_ep_disable,
1288
1289 .alloc_request = bcm63xx_udc_alloc_request,
1290 .free_request = bcm63xx_udc_free_request,
1291
1292 .queue = bcm63xx_udc_queue,
1293 .dequeue = bcm63xx_udc_dequeue,
1294
1295 .set_halt = bcm63xx_udc_set_halt,
1296 .set_wedge = bcm63xx_udc_set_wedge,
1297 };
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1309 struct usb_ctrlrequest *ctrl)
1310 {
1311 int rc;
1312
1313 spin_unlock_irq(&udc->lock);
1314 rc = udc->driver->setup(&udc->gadget, ctrl);
1315 spin_lock_irq(&udc->lock);
1316 return rc;
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1332 {
1333 struct usb_ctrlrequest ctrl;
1334 int rc;
1335
1336 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1337 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1338 ctrl.wValue = cpu_to_le16(udc->cfg);
1339 ctrl.wIndex = 0;
1340 ctrl.wLength = 0;
1341
1342 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1343 if (rc < 0) {
1344 dev_warn_ratelimited(udc->dev,
1345 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1346 udc->cfg);
1347 }
1348 return rc;
1349 }
1350
1351
1352
1353
1354
1355 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1356 {
1357 struct usb_ctrlrequest ctrl;
1358 int rc;
1359
1360 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1361 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1362 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1363 ctrl.wIndex = cpu_to_le16(udc->iface);
1364 ctrl.wLength = 0;
1365
1366 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1367 if (rc < 0) {
1368 dev_warn_ratelimited(udc->dev,
1369 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1370 udc->iface, udc->alt_iface);
1371 }
1372 return rc;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1382 struct usb_request *req)
1383 {
1384 struct bcm63xx_req *breq = our_req(req);
1385 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1386
1387 BUG_ON(udc->ep0_request);
1388 udc->ep0_request = req;
1389
1390 req->actual = 0;
1391 breq->offset = 0;
1392 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1393 iudma_write(udc, iudma, breq);
1394 }
1395
1396
1397
1398
1399
1400
1401
1402 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1403 struct usb_request *req, int status)
1404 {
1405 req->status = status;
1406 if (status)
1407 req->actual = 0;
1408 if (req->complete) {
1409 spin_unlock_irq(&udc->lock);
1410 req->complete(&udc->bep[0].ep, req);
1411 spin_lock_irq(&udc->lock);
1412 }
1413 }
1414
1415
1416
1417
1418
1419
1420
1421 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1422 {
1423 struct usb_request *req = udc->ep0_reply;
1424
1425 udc->ep0_reply = NULL;
1426 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1427 if (udc->ep0_request == req) {
1428 udc->ep0_req_completed = 0;
1429 udc->ep0_request = NULL;
1430 }
1431 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1432 }
1433
1434
1435
1436
1437
1438
1439 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1440 {
1441 struct usb_request *req = udc->ep0_request;
1442
1443 udc->ep0_req_completed = 0;
1444 udc->ep0_request = NULL;
1445
1446 return req->actual;
1447 }
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1459 int length)
1460 {
1461 struct usb_request *req = &udc->ep0_ctrl_req.req;
1462
1463 req->buf = udc->ep0_ctrl_buf;
1464 req->length = length;
1465 req->complete = NULL;
1466
1467 bcm63xx_ep0_map_write(udc, ch_idx, req);
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1479 {
1480 int rc;
1481 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1482
1483 rc = bcm63xx_ep0_read_complete(udc);
1484
1485 if (rc < 0) {
1486 dev_err(udc->dev, "missing SETUP packet\n");
1487 return EP0_IDLE;
1488 }
1489
1490
1491
1492
1493
1494
1495 if (rc == 0)
1496 return EP0_REQUEUE;
1497
1498
1499 if (rc != sizeof(*ctrl)) {
1500 dev_warn_ratelimited(udc->dev,
1501 "malformed SETUP packet (%d bytes)\n", rc);
1502 return EP0_REQUEUE;
1503 }
1504
1505
1506 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1507 if (rc < 0) {
1508 bcm63xx_set_stall(udc, &udc->bep[0], true);
1509 return EP0_REQUEUE;
1510 }
1511
1512 if (!ctrl->wLength)
1513 return EP0_REQUEUE;
1514 else if (ctrl->bRequestType & USB_DIR_IN)
1515 return EP0_IN_DATA_PHASE_SETUP;
1516 else
1517 return EP0_OUT_DATA_PHASE_SETUP;
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1532 {
1533 if (udc->ep0_req_reset) {
1534 udc->ep0_req_reset = 0;
1535 } else if (udc->ep0_req_set_cfg) {
1536 udc->ep0_req_set_cfg = 0;
1537 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1538 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1539 } else if (udc->ep0_req_set_iface) {
1540 udc->ep0_req_set_iface = 0;
1541 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1542 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1543 } else if (udc->ep0_req_completed) {
1544 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1545 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1546 } else if (udc->ep0_req_shutdown) {
1547 udc->ep0_req_shutdown = 0;
1548 udc->ep0_req_completed = 0;
1549 udc->ep0_request = NULL;
1550 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1551 usb_gadget_unmap_request(&udc->gadget,
1552 &udc->ep0_ctrl_req.req, 0);
1553
1554
1555 mb();
1556 udc->ep0state = EP0_SHUTDOWN;
1557 } else if (udc->ep0_reply) {
1558
1559
1560
1561
1562
1563 dev_warn(udc->dev, "nuking unexpected reply\n");
1564 bcm63xx_ep0_nuke_reply(udc, 0);
1565 } else {
1566 return -EAGAIN;
1567 }
1568
1569 return 0;
1570 }
1571
1572
1573
1574
1575
1576
1577
1578 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1579 {
1580 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1581 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1582
1583 switch (udc->ep0state) {
1584 case EP0_REQUEUE:
1585
1586 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1587 BCM63XX_MAX_CTRL_PKT);
1588 ep0state = EP0_IDLE;
1589 break;
1590 case EP0_IDLE:
1591 return bcm63xx_ep0_do_idle(udc);
1592 case EP0_IN_DATA_PHASE_SETUP:
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 if (udc->ep0_reply) {
1603 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1604 udc->ep0_reply);
1605 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1606 } else if (shutdown) {
1607 ep0state = EP0_REQUEUE;
1608 }
1609 break;
1610 case EP0_IN_DATA_PHASE_COMPLETE: {
1611
1612
1613
1614
1615
1616
1617
1618 if (udc->ep0_req_completed) {
1619 udc->ep0_reply = NULL;
1620 bcm63xx_ep0_read_complete(udc);
1621
1622
1623
1624
1625 ep0state = EP0_REQUEUE;
1626 } else if (shutdown) {
1627 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1628 bcm63xx_ep0_nuke_reply(udc, 1);
1629 ep0state = EP0_REQUEUE;
1630 }
1631 break;
1632 }
1633 case EP0_OUT_DATA_PHASE_SETUP:
1634
1635 if (udc->ep0_reply) {
1636 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1637 udc->ep0_reply);
1638 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1639 } else if (shutdown) {
1640 ep0state = EP0_REQUEUE;
1641 }
1642 break;
1643 case EP0_OUT_DATA_PHASE_COMPLETE: {
1644
1645 if (udc->ep0_req_completed) {
1646 udc->ep0_reply = NULL;
1647 bcm63xx_ep0_read_complete(udc);
1648
1649
1650 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1651 ep0state = EP0_OUT_STATUS_PHASE;
1652 } else if (shutdown) {
1653 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1654 bcm63xx_ep0_nuke_reply(udc, 0);
1655 ep0state = EP0_REQUEUE;
1656 }
1657 break;
1658 }
1659 case EP0_OUT_STATUS_PHASE:
1660
1661
1662
1663
1664
1665
1666
1667
1668 if (udc->ep0_req_completed) {
1669 bcm63xx_ep0_read_complete(udc);
1670 ep0state = EP0_REQUEUE;
1671 } else if (shutdown) {
1672 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1673 udc->ep0_request = NULL;
1674 ep0state = EP0_REQUEUE;
1675 }
1676 break;
1677 case EP0_IN_FAKE_STATUS_PHASE: {
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 struct usb_request *r = udc->ep0_reply;
1693
1694 if (!r) {
1695 if (shutdown)
1696 ep0state = EP0_IDLE;
1697 break;
1698 }
1699
1700 bcm63xx_ep0_complete(udc, r, 0);
1701 udc->ep0_reply = NULL;
1702 ep0state = EP0_IDLE;
1703 break;
1704 }
1705 case EP0_SHUTDOWN:
1706 break;
1707 }
1708
1709 if (udc->ep0state == ep0state)
1710 return -EAGAIN;
1711
1712 udc->ep0state = ep0state;
1713 return 0;
1714 }
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 static void bcm63xx_ep0_process(struct work_struct *w)
1731 {
1732 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1733 spin_lock_irq(&udc->lock);
1734 while (bcm63xx_ep0_one_round(udc) == 0)
1735 ;
1736 spin_unlock_irq(&udc->lock);
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1748 {
1749 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1750
1751 return (usbd_readl(udc, USBD_STATUS_REG) &
1752 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761
1762 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1763 {
1764 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1765 unsigned long flags;
1766 int i, rc = -EINVAL;
1767
1768 spin_lock_irqsave(&udc->lock, flags);
1769 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1770 udc->gadget.speed = USB_SPEED_UNKNOWN;
1771 udc->ep0state = EP0_REQUEUE;
1772 bcm63xx_fifo_setup(udc);
1773 bcm63xx_fifo_reset(udc);
1774 bcm63xx_ep_setup(udc);
1775
1776 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1777 for (i = 0; i < BCM63XX_NUM_EP; i++)
1778 bcm63xx_set_stall(udc, &udc->bep[i], false);
1779
1780 bcm63xx_set_ctrl_irqs(udc, true);
1781 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1782 rc = 0;
1783 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1784 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1785
1786 udc->ep0_req_shutdown = 1;
1787 spin_unlock_irqrestore(&udc->lock, flags);
1788
1789 while (1) {
1790 schedule_work(&udc->ep0_wq);
1791 if (udc->ep0state == EP0_SHUTDOWN)
1792 break;
1793 msleep(50);
1794 }
1795 bcm63xx_set_ctrl_irqs(udc, false);
1796 cancel_work_sync(&udc->ep0_wq);
1797 return 0;
1798 }
1799
1800 spin_unlock_irqrestore(&udc->lock, flags);
1801 return rc;
1802 }
1803
1804
1805
1806
1807
1808
1809 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1810 struct usb_gadget_driver *driver)
1811 {
1812 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1813 unsigned long flags;
1814
1815 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1816 !driver->setup)
1817 return -EINVAL;
1818 if (!udc)
1819 return -ENODEV;
1820 if (udc->driver)
1821 return -EBUSY;
1822
1823 spin_lock_irqsave(&udc->lock, flags);
1824
1825 set_clocks(udc, true);
1826 bcm63xx_fifo_setup(udc);
1827 bcm63xx_ep_init(udc);
1828 bcm63xx_ep_setup(udc);
1829 bcm63xx_fifo_reset(udc);
1830 bcm63xx_select_phy_mode(udc, true);
1831
1832 udc->driver = driver;
1833 driver->driver.bus = NULL;
1834 udc->gadget.dev.of_node = udc->dev->of_node;
1835
1836 spin_unlock_irqrestore(&udc->lock, flags);
1837
1838 return 0;
1839 }
1840
1841
1842
1843
1844
1845
1846 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1847 {
1848 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1849 unsigned long flags;
1850
1851 spin_lock_irqsave(&udc->lock, flags);
1852
1853 udc->driver = NULL;
1854
1855
1856
1857
1858
1859
1860
1861 msleep(100);
1862
1863 bcm63xx_select_phy_mode(udc, false);
1864 set_clocks(udc, false);
1865
1866 spin_unlock_irqrestore(&udc->lock, flags);
1867
1868 return 0;
1869 }
1870
1871 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1872 .get_frame = bcm63xx_udc_get_frame,
1873 .pullup = bcm63xx_udc_pullup,
1874 .udc_start = bcm63xx_udc_start,
1875 .udc_stop = bcm63xx_udc_stop,
1876 };
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1892 {
1893 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1894
1895 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1896 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1897 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1898 USBD_STATUS_ALTINTF_SHIFT;
1899 bcm63xx_ep_setup(udc);
1900 }
1901
1902
1903
1904
1905
1906
1907
1908
1909 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1910 {
1911 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1912 enum usb_device_speed oldspeed = udc->gadget.speed;
1913
1914 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1915 case BCM63XX_SPD_HIGH:
1916 udc->gadget.speed = USB_SPEED_HIGH;
1917 break;
1918 case BCM63XX_SPD_FULL:
1919 udc->gadget.speed = USB_SPEED_FULL;
1920 break;
1921 default:
1922
1923 udc->gadget.speed = USB_SPEED_UNKNOWN;
1924 dev_err(udc->dev,
1925 "received SETUP packet with invalid link speed\n");
1926 return 0;
1927 }
1928
1929 if (udc->gadget.speed != oldspeed) {
1930 dev_info(udc->dev, "link up, %s-speed mode\n",
1931 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1932 return 1;
1933 } else {
1934 return 0;
1935 }
1936 }
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1950 {
1951 int i;
1952
1953 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1954 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1955 if (!new_status)
1956 clear_bit(i, &udc->wedgemap);
1957 }
1958 }
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1969 {
1970 struct bcm63xx_udc *udc = dev_id;
1971 u32 stat;
1972 bool disconnected = false, bus_reset = false;
1973
1974 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1975 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1976
1977 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1978
1979 spin_lock(&udc->lock);
1980 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1981
1982
1983 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1984 USBD_EVENTS_USB_LINK_MASK) &&
1985 udc->gadget.speed != USB_SPEED_UNKNOWN)
1986 dev_info(udc->dev, "link down\n");
1987
1988 udc->gadget.speed = USB_SPEED_UNKNOWN;
1989 disconnected = true;
1990 }
1991 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1992 bcm63xx_fifo_setup(udc);
1993 bcm63xx_fifo_reset(udc);
1994 bcm63xx_ep_setup(udc);
1995
1996 bcm63xx_update_wedge(udc, false);
1997
1998 udc->ep0_req_reset = 1;
1999 schedule_work(&udc->ep0_wq);
2000 bus_reset = true;
2001 }
2002 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2003 if (bcm63xx_update_link_speed(udc)) {
2004 bcm63xx_fifo_setup(udc);
2005 bcm63xx_ep_setup(udc);
2006 }
2007 bcm63xx_update_wedge(udc, true);
2008 }
2009 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2010 bcm63xx_update_cfg_iface(udc);
2011 udc->ep0_req_set_cfg = 1;
2012 schedule_work(&udc->ep0_wq);
2013 }
2014 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2015 bcm63xx_update_cfg_iface(udc);
2016 udc->ep0_req_set_iface = 1;
2017 schedule_work(&udc->ep0_wq);
2018 }
2019 spin_unlock(&udc->lock);
2020
2021 if (disconnected && udc->driver)
2022 udc->driver->disconnect(&udc->gadget);
2023 else if (bus_reset && udc->driver)
2024 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2025
2026 return IRQ_HANDLED;
2027 }
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2040 {
2041 struct iudma_ch *iudma = dev_id;
2042 struct bcm63xx_udc *udc = iudma->udc;
2043 struct bcm63xx_ep *bep;
2044 struct usb_request *req = NULL;
2045 struct bcm63xx_req *breq = NULL;
2046 int rc;
2047 bool is_done = false;
2048
2049 spin_lock(&udc->lock);
2050
2051 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2052 ENETDMAC_IR_REG, iudma->ch_idx);
2053 bep = iudma->bep;
2054 rc = iudma_read(udc, iudma);
2055
2056
2057 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2058 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2059 req = udc->ep0_request;
2060 breq = our_req(req);
2061
2062
2063 if (rc >= 0) {
2064 req->actual += rc;
2065
2066 if (req->actual >= req->length || breq->bd_bytes > rc) {
2067 udc->ep0_req_completed = 1;
2068 is_done = true;
2069 schedule_work(&udc->ep0_wq);
2070
2071
2072 req->actual = min(req->actual, req->length);
2073 } else {
2074
2075 iudma_write(udc, iudma, breq);
2076 }
2077 }
2078 } else if (!list_empty(&bep->queue)) {
2079 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2080 req = &breq->req;
2081
2082 if (rc >= 0) {
2083 req->actual += rc;
2084
2085 if (req->actual >= req->length || breq->bd_bytes > rc) {
2086 is_done = true;
2087 list_del(&breq->queue);
2088
2089 req->actual = min(req->actual, req->length);
2090
2091 if (!list_empty(&bep->queue)) {
2092 struct bcm63xx_req *next;
2093
2094 next = list_first_entry(&bep->queue,
2095 struct bcm63xx_req, queue);
2096 iudma_write(udc, iudma, next);
2097 }
2098 } else {
2099 iudma_write(udc, iudma, breq);
2100 }
2101 }
2102 }
2103 spin_unlock(&udc->lock);
2104
2105 if (is_done) {
2106 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2107 if (req->complete)
2108 req->complete(&bep->ep, req);
2109 }
2110
2111 return IRQ_HANDLED;
2112 }
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2126 {
2127 struct bcm63xx_udc *udc = s->private;
2128
2129 if (!udc->driver)
2130 return -ENODEV;
2131
2132 seq_printf(s, "ep0 state: %s\n",
2133 bcm63xx_ep0_state_names[udc->ep0state]);
2134 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2135 udc->ep0_req_reset ? "reset " : "",
2136 udc->ep0_req_set_cfg ? "set_cfg " : "",
2137 udc->ep0_req_set_iface ? "set_iface " : "",
2138 udc->ep0_req_shutdown ? "shutdown " : "",
2139 udc->ep0_request ? "pending " : "",
2140 udc->ep0_req_completed ? "completed " : "",
2141 udc->ep0_reply ? "reply " : "");
2142 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2143 udc->cfg, udc->iface, udc->alt_iface);
2144 seq_printf(s, "regs:\n");
2145 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2146 usbd_readl(udc, USBD_CONTROL_REG),
2147 usbd_readl(udc, USBD_STRAPS_REG),
2148 usbd_readl(udc, USBD_STATUS_REG));
2149 seq_printf(s, " events: %08x; stall: %08x\n",
2150 usbd_readl(udc, USBD_EVENTS_REG),
2151 usbd_readl(udc, USBD_STALL_REG));
2152
2153 return 0;
2154 }
2155 DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2156
2157
2158
2159
2160
2161
2162
2163
2164 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2165 {
2166 struct bcm63xx_udc *udc = s->private;
2167 int ch_idx, i;
2168 u32 sram2, sram3;
2169
2170 if (!udc->driver)
2171 return -ENODEV;
2172
2173 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2174 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2175 struct list_head *pos;
2176
2177 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2178 switch (iudma_defaults[ch_idx].ep_type) {
2179 case BCMEP_CTRL:
2180 seq_printf(s, "control");
2181 break;
2182 case BCMEP_BULK:
2183 seq_printf(s, "bulk");
2184 break;
2185 case BCMEP_INTR:
2186 seq_printf(s, "interrupt");
2187 break;
2188 }
2189 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2190 seq_printf(s, " [ep%d]:\n",
2191 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2192 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2193 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2194 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2195 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2196 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2197
2198 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2199 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2200 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2201 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2202 sram2 >> 16, sram2 & 0xffff,
2203 sram3 >> 16, sram3 & 0xffff,
2204 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2205 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2206 iudma->n_bds);
2207
2208 if (iudma->bep) {
2209 i = 0;
2210 list_for_each(pos, &iudma->bep->queue)
2211 i++;
2212 seq_printf(s, "; %d queued\n", i);
2213 } else {
2214 seq_printf(s, "\n");
2215 }
2216
2217 for (i = 0; i < iudma->n_bds; i++) {
2218 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2219
2220 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2221 i * sizeof(*d), i,
2222 d->len_stat >> 16, d->len_stat & 0xffff,
2223 d->address);
2224 if (d == iudma->read_bd)
2225 seq_printf(s, " <<RD");
2226 if (d == iudma->write_bd)
2227 seq_printf(s, " <<WR");
2228 seq_printf(s, "\n");
2229 }
2230
2231 seq_printf(s, "\n");
2232 }
2233
2234 return 0;
2235 }
2236 DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2237
2238
2239
2240
2241
2242 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2243 {
2244 struct dentry *root;
2245
2246 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2247 return;
2248
2249 root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2250 debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2251 debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2252 }
2253
2254
2255
2256
2257
2258
2259
2260 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2261 {
2262 debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
2263 }
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 static int bcm63xx_udc_probe(struct platform_device *pdev)
2277 {
2278 struct device *dev = &pdev->dev;
2279 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2280 struct bcm63xx_udc *udc;
2281 int rc = -ENOMEM, i, irq;
2282
2283 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2284 if (!udc)
2285 return -ENOMEM;
2286
2287 platform_set_drvdata(pdev, udc);
2288 udc->dev = dev;
2289 udc->pd = pd;
2290
2291 if (!pd) {
2292 dev_err(dev, "missing platform data\n");
2293 return -EINVAL;
2294 }
2295
2296 udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2297 if (IS_ERR(udc->usbd_regs))
2298 return PTR_ERR(udc->usbd_regs);
2299
2300 udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2301 if (IS_ERR(udc->iudma_regs))
2302 return PTR_ERR(udc->iudma_regs);
2303
2304 spin_lock_init(&udc->lock);
2305 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2306
2307 udc->gadget.ops = &bcm63xx_udc_ops;
2308 udc->gadget.name = dev_name(dev);
2309
2310 if (!pd->use_fullspeed && !use_fullspeed)
2311 udc->gadget.max_speed = USB_SPEED_HIGH;
2312 else
2313 udc->gadget.max_speed = USB_SPEED_FULL;
2314
2315
2316 rc = bcm63xx_init_udc_hw(udc);
2317 if (rc)
2318 return rc;
2319
2320 rc = -ENXIO;
2321
2322
2323 irq = platform_get_irq(pdev, 0);
2324 if (irq < 0) {
2325 rc = irq;
2326 goto out_uninit;
2327 }
2328 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2329 dev_name(dev), udc) < 0)
2330 goto report_request_failure;
2331
2332
2333 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2334 irq = platform_get_irq(pdev, i + 1);
2335 if (irq < 0) {
2336 rc = irq;
2337 goto out_uninit;
2338 }
2339 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2340 dev_name(dev), &udc->iudma[i]) < 0)
2341 goto report_request_failure;
2342 }
2343
2344 bcm63xx_udc_init_debugfs(udc);
2345 rc = usb_add_gadget_udc(dev, &udc->gadget);
2346 if (!rc)
2347 return 0;
2348
2349 bcm63xx_udc_cleanup_debugfs(udc);
2350 out_uninit:
2351 bcm63xx_uninit_udc_hw(udc);
2352 return rc;
2353
2354 report_request_failure:
2355 dev_err(dev, "error requesting IRQ #%d\n", irq);
2356 goto out_uninit;
2357 }
2358
2359
2360
2361
2362
2363 static int bcm63xx_udc_remove(struct platform_device *pdev)
2364 {
2365 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2366
2367 bcm63xx_udc_cleanup_debugfs(udc);
2368 usb_del_gadget_udc(&udc->gadget);
2369 BUG_ON(udc->driver);
2370
2371 bcm63xx_uninit_udc_hw(udc);
2372
2373 return 0;
2374 }
2375
2376 static struct platform_driver bcm63xx_udc_driver = {
2377 .probe = bcm63xx_udc_probe,
2378 .remove = bcm63xx_udc_remove,
2379 .driver = {
2380 .name = DRV_MODULE_NAME,
2381 },
2382 };
2383 module_platform_driver(bcm63xx_udc_driver);
2384
2385 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2386 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2387 MODULE_LICENSE("GPL");
2388 MODULE_ALIAS("platform:" DRV_MODULE_NAME);