0001
0002
0003
0004
0005 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0006 #include <linux/kernel.h>
0007 #include <linux/module.h>
0008 #include <linux/pci.h>
0009 #include <linux/delay.h>
0010 #include <linux/errno.h>
0011 #include <linux/gpio/consumer.h>
0012 #include <linux/gpio/machine.h>
0013 #include <linux/list.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/usb/ch9.h>
0016 #include <linux/usb/gadget.h>
0017 #include <linux/irq.h>
0018
0019 #define PCH_VBUS_PERIOD 3000
0020 #define PCH_VBUS_INTERVAL 10
0021
0022
0023 #define UDC_EP_REG_SHIFT 0x20
0024
0025 #define UDC_EPCTL_ADDR 0x00
0026 #define UDC_EPSTS_ADDR 0x04
0027 #define UDC_BUFIN_FRAMENUM_ADDR 0x08
0028 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C
0029 #define UDC_SUBPTR_ADDR 0x10
0030 #define UDC_DESPTR_ADDR 0x14
0031 #define UDC_CONFIRM_ADDR 0x18
0032
0033 #define UDC_DEVCFG_ADDR 0x400
0034 #define UDC_DEVCTL_ADDR 0x404
0035 #define UDC_DEVSTS_ADDR 0x408
0036 #define UDC_DEVIRQSTS_ADDR 0x40C
0037 #define UDC_DEVIRQMSK_ADDR 0x410
0038 #define UDC_EPIRQSTS_ADDR 0x414
0039 #define UDC_EPIRQMSK_ADDR 0x418
0040 #define UDC_DEVLPM_ADDR 0x41C
0041 #define UDC_CSR_BUSY_ADDR 0x4f0
0042 #define UDC_SRST_ADDR 0x4fc
0043 #define UDC_CSR_ADDR 0x500
0044
0045
0046
0047 #define UDC_EPCTL_MRXFLUSH (1 << 12)
0048 #define UDC_EPCTL_RRDY (1 << 9)
0049 #define UDC_EPCTL_CNAK (1 << 8)
0050 #define UDC_EPCTL_SNAK (1 << 7)
0051 #define UDC_EPCTL_NAK (1 << 6)
0052 #define UDC_EPCTL_P (1 << 3)
0053 #define UDC_EPCTL_F (1 << 1)
0054 #define UDC_EPCTL_S (1 << 0)
0055 #define UDC_EPCTL_ET_SHIFT 4
0056
0057 #define UDC_EPCTL_ET_MASK 0x00000030
0058
0059 #define UDC_EPCTL_ET_CONTROL 0
0060 #define UDC_EPCTL_ET_ISO 1
0061 #define UDC_EPCTL_ET_BULK 2
0062 #define UDC_EPCTL_ET_INTERRUPT 3
0063
0064
0065
0066 #define UDC_EPSTS_XFERDONE (1 << 27)
0067 #define UDC_EPSTS_RSS (1 << 26)
0068 #define UDC_EPSTS_RCS (1 << 25)
0069 #define UDC_EPSTS_TXEMPTY (1 << 24)
0070 #define UDC_EPSTS_TDC (1 << 10)
0071 #define UDC_EPSTS_HE (1 << 9)
0072 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
0073 #define UDC_EPSTS_BNA (1 << 7)
0074 #define UDC_EPSTS_IN (1 << 6)
0075 #define UDC_EPSTS_OUT_SHIFT 4
0076
0077 #define UDC_EPSTS_OUT_MASK 0x00000030
0078 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
0079
0080 #define UDC_EPSTS_OUT_SETUP 2
0081 #define UDC_EPSTS_OUT_DATA 1
0082
0083
0084
0085 #define UDC_DEVCFG_CSR_PRG (1 << 17)
0086 #define UDC_DEVCFG_SP (1 << 3)
0087
0088 #define UDC_DEVCFG_SPD_HS 0x0
0089 #define UDC_DEVCFG_SPD_FS 0x1
0090 #define UDC_DEVCFG_SPD_LS 0x2
0091
0092
0093
0094 #define UDC_DEVCTL_THLEN_SHIFT 24
0095 #define UDC_DEVCTL_BRLEN_SHIFT 16
0096 #define UDC_DEVCTL_CSR_DONE (1 << 13)
0097 #define UDC_DEVCTL_SD (1 << 10)
0098 #define UDC_DEVCTL_MODE (1 << 9)
0099 #define UDC_DEVCTL_BREN (1 << 8)
0100 #define UDC_DEVCTL_THE (1 << 7)
0101 #define UDC_DEVCTL_DU (1 << 4)
0102 #define UDC_DEVCTL_TDE (1 << 3)
0103 #define UDC_DEVCTL_RDE (1 << 2)
0104 #define UDC_DEVCTL_RES (1 << 0)
0105
0106
0107
0108 #define UDC_DEVSTS_TS_SHIFT 18
0109 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
0110 #define UDC_DEVSTS_ALT_SHIFT 8
0111 #define UDC_DEVSTS_INTF_SHIFT 4
0112 #define UDC_DEVSTS_CFG_SHIFT 0
0113
0114 #define UDC_DEVSTS_TS_MASK 0xfffc0000
0115 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
0116 #define UDC_DEVSTS_ALT_MASK 0x00000f00
0117 #define UDC_DEVSTS_INTF_MASK 0x000000f0
0118 #define UDC_DEVSTS_CFG_MASK 0x0000000f
0119
0120 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
0121 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
0122 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
0123 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
0124
0125
0126
0127 #define UDC_DEVINT_RWKP (1 << 7)
0128 #define UDC_DEVINT_ENUM (1 << 6)
0129 #define UDC_DEVINT_SOF (1 << 5)
0130 #define UDC_DEVINT_US (1 << 4)
0131 #define UDC_DEVINT_UR (1 << 3)
0132 #define UDC_DEVINT_ES (1 << 2)
0133 #define UDC_DEVINT_SI (1 << 1)
0134 #define UDC_DEVINT_SC (1 << 0)
0135
0136 #define UDC_DEVINT_MSK 0x7f
0137
0138
0139
0140 #define UDC_EPINT_IN_SHIFT 0
0141 #define UDC_EPINT_OUT_SHIFT 16
0142 #define UDC_EPINT_IN_EP0 (1 << 0)
0143 #define UDC_EPINT_OUT_EP0 (1 << 16)
0144
0145 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
0146
0147
0148
0149 #define UDC_CSR_BUSY (1 << 0)
0150
0151
0152
0153 #define UDC_PSRST (1 << 1)
0154 #define UDC_SRST (1 << 0)
0155
0156
0157
0158 #define UDC_CSR_NE_NUM_SHIFT 0
0159 #define UDC_CSR_NE_DIR_SHIFT 4
0160 #define UDC_CSR_NE_TYPE_SHIFT 5
0161 #define UDC_CSR_NE_CFG_SHIFT 7
0162 #define UDC_CSR_NE_INTF_SHIFT 11
0163 #define UDC_CSR_NE_ALT_SHIFT 15
0164 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
0165
0166 #define UDC_CSR_NE_NUM_MASK 0x0000000f
0167 #define UDC_CSR_NE_DIR_MASK 0x00000010
0168 #define UDC_CSR_NE_TYPE_MASK 0x00000060
0169 #define UDC_CSR_NE_CFG_MASK 0x00000780
0170 #define UDC_CSR_NE_INTF_MASK 0x00007800
0171 #define UDC_CSR_NE_ALT_MASK 0x00078000
0172 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
0173
0174 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
0175 #define PCH_UDC_EPINT(in, num)\
0176 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
0177
0178
0179 #define UDC_EP0IN_IDX 0
0180 #define UDC_EP0OUT_IDX 1
0181 #define UDC_EPIN_IDX(ep) (ep * 2)
0182 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
0183 #define PCH_UDC_EP0 0
0184 #define PCH_UDC_EP1 1
0185 #define PCH_UDC_EP2 2
0186 #define PCH_UDC_EP3 3
0187
0188
0189 #define PCH_UDC_EP_NUM 32
0190 #define PCH_UDC_USED_EP_NUM 4
0191
0192 #define PCH_UDC_BRLEN 0x0F
0193 #define PCH_UDC_THLEN 0x1F
0194
0195 #define UDC_EP0IN_BUFF_SIZE 16
0196 #define UDC_EPIN_BUFF_SIZE 256
0197 #define UDC_EP0OUT_BUFF_SIZE 16
0198 #define UDC_EPOUT_BUFF_SIZE 256
0199
0200 #define UDC_EP0IN_MAX_PKT_SIZE 64
0201 #define UDC_EP0OUT_MAX_PKT_SIZE 64
0202 #define UDC_BULK_MAX_PKT_SIZE 512
0203
0204
0205 #define DMA_DIR_RX 1
0206 #define DMA_DIR_TX 2
0207 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
0208 #define UDC_DMA_MAXPACKET 65536
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 struct pch_udc_data_dma_desc {
0219 u32 status;
0220 u32 reserved;
0221 u32 dataptr;
0222 u32 next;
0223 };
0224
0225
0226
0227
0228
0229
0230
0231
0232 struct pch_udc_stp_dma_desc {
0233 u32 status;
0234 u32 reserved;
0235 struct usb_ctrlrequest request;
0236 } __attribute((packed));
0237
0238
0239
0240 #define PCH_UDC_BUFF_STS 0xC0000000
0241 #define PCH_UDC_BS_HST_RDY 0x00000000
0242 #define PCH_UDC_BS_DMA_BSY 0x40000000
0243 #define PCH_UDC_BS_DMA_DONE 0x80000000
0244 #define PCH_UDC_BS_HST_BSY 0xC0000000
0245
0246 #define PCH_UDC_RXTX_STS 0x30000000
0247 #define PCH_UDC_RTS_SUCC 0x00000000
0248 #define PCH_UDC_RTS_DESERR 0x10000000
0249 #define PCH_UDC_RTS_BUFERR 0x30000000
0250
0251 #define PCH_UDC_DMA_LAST 0x08000000
0252
0253 #define PCH_UDC_RXTX_BYTES 0x0000ffff
0254
0255
0256
0257
0258
0259
0260
0261
0262 struct pch_udc_cfg_data {
0263 u16 cur_cfg;
0264 u16 cur_intf;
0265 u16 cur_alt;
0266 };
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 struct pch_udc_ep {
0285 struct usb_ep ep;
0286 dma_addr_t td_stp_phys;
0287 dma_addr_t td_data_phys;
0288 struct pch_udc_stp_dma_desc *td_stp;
0289 struct pch_udc_data_dma_desc *td_data;
0290 struct pch_udc_dev *dev;
0291 unsigned long offset_addr;
0292 struct list_head queue;
0293 unsigned num:5,
0294 in:1,
0295 halted:1;
0296 unsigned long epsts;
0297 };
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 struct pch_vbus_gpio_data {
0308 struct gpio_desc *port;
0309 int intr;
0310 struct work_struct irq_work_fall;
0311 struct work_struct irq_work_rise;
0312 };
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 struct pch_udc_dev {
0340 struct usb_gadget gadget;
0341 struct usb_gadget_driver *driver;
0342 struct pci_dev *pdev;
0343 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
0344 spinlock_t lock;
0345 unsigned
0346 stall:1,
0347 prot_stall:1,
0348 suspended:1,
0349 connected:1,
0350 vbus_session:1,
0351 set_cfg_not_acked:1,
0352 waiting_zlp_ack:1;
0353 struct dma_pool *data_requests;
0354 struct dma_pool *stp_requests;
0355 dma_addr_t dma_addr;
0356 struct usb_ctrlrequest setup_data;
0357 void __iomem *base_addr;
0358 unsigned short bar;
0359 struct pch_udc_cfg_data cfg_data;
0360 struct pch_vbus_gpio_data vbus_gpio;
0361 };
0362 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
0363
0364 #define PCH_UDC_PCI_BAR_QUARK_X1000 0
0365 #define PCH_UDC_PCI_BAR 1
0366
0367 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
0368 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
0369
0370 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
0371 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
0372
0373 static const char ep0_string[] = "ep0in";
0374 static DEFINE_SPINLOCK(udc_stall_spinlock);
0375 static bool speed_fs;
0376 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
0377 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 struct pch_udc_request {
0391 struct usb_request req;
0392 dma_addr_t td_data_phys;
0393 struct pch_udc_data_dma_desc *td_data;
0394 struct pch_udc_data_dma_desc *td_data_last;
0395 struct list_head queue;
0396 unsigned dma_going:1,
0397 dma_done:1;
0398 unsigned chain_len;
0399 };
0400
0401 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
0402 {
0403 return ioread32(dev->base_addr + reg);
0404 }
0405
0406 static inline void pch_udc_writel(struct pch_udc_dev *dev,
0407 unsigned long val, unsigned long reg)
0408 {
0409 iowrite32(val, dev->base_addr + reg);
0410 }
0411
0412 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
0413 unsigned long reg,
0414 unsigned long bitmask)
0415 {
0416 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
0417 }
0418
0419 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
0420 unsigned long reg,
0421 unsigned long bitmask)
0422 {
0423 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
0424 }
0425
0426 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
0427 {
0428 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
0429 }
0430
0431 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
0432 unsigned long val, unsigned long reg)
0433 {
0434 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
0435 }
0436
0437 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
0438 unsigned long reg,
0439 unsigned long bitmask)
0440 {
0441 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
0442 }
0443
0444 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
0445 unsigned long reg,
0446 unsigned long bitmask)
0447 {
0448 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
0449 }
0450
0451
0452
0453
0454
0455 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
0456 {
0457 unsigned int count = 200;
0458
0459
0460 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
0461 && --count)
0462 cpu_relax();
0463 if (!count)
0464 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
0465 }
0466
0467
0468
0469
0470
0471
0472
0473 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
0474 unsigned int ep)
0475 {
0476 unsigned long reg = PCH_UDC_CSR(ep);
0477
0478 pch_udc_csr_busy(dev);
0479 pch_udc_writel(dev, val, reg);
0480 pch_udc_csr_busy(dev);
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
0491 {
0492 unsigned long reg = PCH_UDC_CSR(ep);
0493
0494 pch_udc_csr_busy(dev);
0495 pch_udc_readl(dev, reg);
0496 pch_udc_csr_busy(dev);
0497 return pch_udc_readl(dev, reg);
0498 }
0499
0500
0501
0502
0503
0504 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
0505 {
0506 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0507 mdelay(1);
0508 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0509 }
0510
0511
0512
0513
0514
0515
0516 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
0517 {
0518 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
0519 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
0520 }
0521
0522
0523
0524
0525
0526 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
0527 {
0528 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
0529 }
0530
0531
0532
0533
0534
0535 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
0536 {
0537 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
0538 }
0539
0540
0541
0542
0543
0544 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
0545 {
0546 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
0547 }
0548
0549
0550
0551
0552
0553 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
0554 {
0555
0556 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0557 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
0558 mdelay(1);
0559
0560 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0561 }
0562
0563 static void pch_udc_init(struct pch_udc_dev *dev);
0564
0565
0566
0567
0568
0569
0570 static void pch_udc_reconnect(struct pch_udc_dev *dev)
0571 {
0572 pch_udc_init(dev);
0573
0574
0575
0576 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
0577 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
0578
0579
0580 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0581 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
0582 mdelay(1);
0583
0584 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
0595 int is_active)
0596 {
0597 unsigned long iflags;
0598
0599 spin_lock_irqsave(&dev->lock, iflags);
0600 if (is_active) {
0601 pch_udc_reconnect(dev);
0602 dev->vbus_session = 1;
0603 } else {
0604 if (dev->driver && dev->driver->disconnect) {
0605 spin_unlock_irqrestore(&dev->lock, iflags);
0606 dev->driver->disconnect(&dev->gadget);
0607 spin_lock_irqsave(&dev->lock, iflags);
0608 }
0609 pch_udc_set_disconnect(dev);
0610 dev->vbus_session = 0;
0611 }
0612 spin_unlock_irqrestore(&dev->lock, iflags);
0613 }
0614
0615
0616
0617
0618
0619 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
0620 {
0621 if (ep->in) {
0622 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
0623 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
0624 } else {
0625 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
0626 }
0627 }
0628
0629
0630
0631
0632
0633 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
0634 {
0635
0636 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
0637
0638 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
0639 }
0640
0641
0642
0643
0644
0645
0646 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
0647 u8 type)
0648 {
0649 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
0650 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
0651 }
0652
0653
0654
0655
0656
0657
0658
0659 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
0660 u32 buf_size, u32 ep_in)
0661 {
0662 u32 data;
0663 if (ep_in) {
0664 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
0665 data = (data & 0xffff0000) | (buf_size & 0xffff);
0666 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
0667 } else {
0668 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
0669 data = (buf_size << 16) | (data & 0xffff);
0670 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
0671 }
0672 }
0673
0674
0675
0676
0677
0678
0679 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
0680 {
0681 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
0682 data = (data & 0xffff0000) | (pkt_size & 0xffff);
0683 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
0684 }
0685
0686
0687
0688
0689
0690
0691 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
0692 {
0693 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
0694 }
0695
0696
0697
0698
0699
0700
0701 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
0702 {
0703 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
0704 }
0705
0706
0707
0708
0709
0710 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
0711 {
0712 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
0713 }
0714
0715
0716
0717
0718
0719 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
0720 {
0721 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
0722 }
0723
0724
0725
0726
0727
0728 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
0729 {
0730 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
0742 {
0743 if (dir == DMA_DIR_RX)
0744 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
0745 else if (dir == DMA_DIR_TX)
0746 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
0747 }
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
0758 {
0759 if (dir == DMA_DIR_RX)
0760 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
0761 else if (dir == DMA_DIR_TX)
0762 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
0763 }
0764
0765
0766
0767
0768
0769
0770 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
0771 {
0772 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
0773 }
0774
0775
0776
0777
0778
0779
0780 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
0781 u32 mask)
0782 {
0783 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
0784 }
0785
0786
0787
0788
0789
0790
0791 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
0792 u32 mask)
0793 {
0794 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
0795 }
0796
0797
0798
0799
0800
0801
0802 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
0803 u32 mask)
0804 {
0805 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
0806 }
0807
0808
0809
0810
0811
0812
0813 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
0814 u32 mask)
0815 {
0816 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
0817 }
0818
0819
0820
0821
0822
0823
0824 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
0825 {
0826 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
0827 }
0828
0829
0830
0831
0832
0833
0834 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
0835 u32 val)
0836 {
0837 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
0838 }
0839
0840
0841
0842
0843
0844
0845 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
0846 {
0847 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
0848 }
0849
0850
0851
0852
0853
0854
0855 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
0856 u32 val)
0857 {
0858 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
0859 }
0860
0861
0862
0863
0864
0865
0866 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
0867 {
0868 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
0869 }
0870
0871
0872
0873
0874
0875
0876 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
0877 {
0878 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
0879 }
0880
0881
0882
0883
0884
0885
0886 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
0887 {
0888 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
0889 }
0890
0891
0892
0893
0894
0895
0896 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
0897 {
0898 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
0899 }
0900
0901
0902
0903
0904
0905
0906 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
0907 u32 stat)
0908 {
0909 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
0910 }
0911
0912
0913
0914
0915
0916
0917 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
0918 {
0919 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
0920 }
0921
0922
0923
0924
0925
0926
0927 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
0928 {
0929 unsigned int loopcnt = 0;
0930 struct pch_udc_dev *dev = ep->dev;
0931
0932 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
0933 return;
0934 if (!ep->in) {
0935 loopcnt = 10000;
0936 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
0937 --loopcnt)
0938 udelay(5);
0939 if (!loopcnt)
0940 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
0941 __func__);
0942 }
0943 loopcnt = 10000;
0944 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
0945 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
0946 udelay(5);
0947 }
0948 if (!loopcnt)
0949 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
0950 __func__, ep->num, (ep->in ? "in" : "out"));
0951 }
0952
0953
0954
0955
0956
0957
0958
0959
0960 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
0961 {
0962 if (dir) {
0963 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
0964 return;
0965 }
0966 }
0967
0968
0969
0970
0971
0972
0973
0974 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
0975 struct pch_udc_cfg_data *cfg,
0976 const struct usb_endpoint_descriptor *desc)
0977 {
0978 u32 val = 0;
0979 u32 buff_size = 0;
0980
0981 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
0982 if (ep->in)
0983 buff_size = UDC_EPIN_BUFF_SIZE;
0984 else
0985 buff_size = UDC_EPOUT_BUFF_SIZE;
0986 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
0987 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
0988 pch_udc_ep_set_nak(ep);
0989 pch_udc_ep_fifo_flush(ep, ep->in);
0990
0991 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
0992 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
0993 UDC_CSR_NE_TYPE_SHIFT) |
0994 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
0995 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
0996 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
0997 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
0998
0999 if (ep->in)
1000 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1001 else
1002 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1003 }
1004
1005
1006
1007
1008
1009 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1010 {
1011 if (ep->in) {
1012
1013 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1014
1015 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1016 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1017 } else {
1018
1019 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 }
1021
1022 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1023 }
1024
1025
1026
1027
1028
1029 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1030 {
1031 unsigned int count = 10000;
1032
1033
1034 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1035 udelay(5);
1036 if (!count)
1037 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1038 }
1039
1040
1041
1042
1043
1044 static void pch_udc_init(struct pch_udc_dev *dev)
1045 {
1046 if (NULL == dev) {
1047 pr_err("%s: Invalid address\n", __func__);
1048 return;
1049 }
1050
1051 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1052 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1053 mdelay(1);
1054 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1055 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1056 mdelay(1);
1057
1058 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1059 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1060
1061
1062 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1063 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064
1065
1066 if (speed_fs)
1067 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1068 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1069 else
1070 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1071 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1072 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1073 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1074 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1075 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1076 UDC_DEVCTL_THE);
1077 }
1078
1079
1080
1081
1082
1083 static void pch_udc_exit(struct pch_udc_dev *dev)
1084 {
1085
1086 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1087
1088 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1089
1090 pch_udc_set_disconnect(dev);
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1102 {
1103 struct pch_udc_dev *dev;
1104
1105 if (!gadget)
1106 return -EINVAL;
1107 dev = container_of(gadget, struct pch_udc_dev, gadget);
1108 return pch_udc_get_frame(dev);
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1120 {
1121 struct pch_udc_dev *dev;
1122 unsigned long flags;
1123
1124 if (!gadget)
1125 return -EINVAL;
1126 dev = container_of(gadget, struct pch_udc_dev, gadget);
1127 spin_lock_irqsave(&dev->lock, flags);
1128 pch_udc_rmt_wakeup(dev);
1129 spin_unlock_irqrestore(&dev->lock, flags);
1130 return 0;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1144 {
1145 struct pch_udc_dev *dev;
1146
1147 if (!gadget)
1148 return -EINVAL;
1149 gadget->is_selfpowered = (value != 0);
1150 dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 if (value)
1152 pch_udc_set_selfpowered(dev);
1153 else
1154 pch_udc_clear_selfpowered(dev);
1155 return 0;
1156 }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1169 {
1170 struct pch_udc_dev *dev;
1171 unsigned long iflags;
1172
1173 if (!gadget)
1174 return -EINVAL;
1175
1176 dev = container_of(gadget, struct pch_udc_dev, gadget);
1177
1178 spin_lock_irqsave(&dev->lock, iflags);
1179 if (is_on) {
1180 pch_udc_reconnect(dev);
1181 } else {
1182 if (dev->driver && dev->driver->disconnect) {
1183 spin_unlock_irqrestore(&dev->lock, iflags);
1184 dev->driver->disconnect(&dev->gadget);
1185 spin_lock_irqsave(&dev->lock, iflags);
1186 }
1187 pch_udc_set_disconnect(dev);
1188 }
1189 spin_unlock_irqrestore(&dev->lock, iflags);
1190
1191 return 0;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1206 {
1207 struct pch_udc_dev *dev;
1208
1209 if (!gadget)
1210 return -EINVAL;
1211 dev = container_of(gadget, struct pch_udc_dev, gadget);
1212 pch_udc_vbus_session(dev, is_active);
1213 return 0;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1228 {
1229 return -EOPNOTSUPP;
1230 }
1231
1232 static int pch_udc_start(struct usb_gadget *g,
1233 struct usb_gadget_driver *driver);
1234 static int pch_udc_stop(struct usb_gadget *g);
1235
1236 static const struct usb_gadget_ops pch_udc_ops = {
1237 .get_frame = pch_udc_pcd_get_frame,
1238 .wakeup = pch_udc_pcd_wakeup,
1239 .set_selfpowered = pch_udc_pcd_selfpowered,
1240 .pullup = pch_udc_pcd_pullup,
1241 .vbus_session = pch_udc_pcd_vbus_session,
1242 .vbus_draw = pch_udc_pcd_vbus_draw,
1243 .udc_start = pch_udc_start,
1244 .udc_stop = pch_udc_stop,
1245 };
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1257 {
1258 int vbus = 0;
1259
1260 if (dev->vbus_gpio.port)
1261 vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
1262 else
1263 vbus = -1;
1264
1265 return vbus;
1266 }
1267
1268
1269
1270
1271
1272
1273
1274 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1275 {
1276 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1277 struct pch_vbus_gpio_data, irq_work_fall);
1278 struct pch_udc_dev *dev =
1279 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1280 int vbus_saved = -1;
1281 int vbus;
1282 int count;
1283
1284 if (!dev->vbus_gpio.port)
1285 return;
1286
1287 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1288 count++) {
1289 vbus = pch_vbus_gpio_get_value(dev);
1290
1291 if ((vbus_saved == vbus) && (vbus == 0)) {
1292 dev_dbg(&dev->pdev->dev, "VBUS fell");
1293 if (dev->driver
1294 && dev->driver->disconnect) {
1295 dev->driver->disconnect(
1296 &dev->gadget);
1297 }
1298 if (dev->vbus_gpio.intr)
1299 pch_udc_init(dev);
1300 else
1301 pch_udc_reconnect(dev);
1302 return;
1303 }
1304 vbus_saved = vbus;
1305 mdelay(PCH_VBUS_INTERVAL);
1306 }
1307 }
1308
1309
1310
1311
1312
1313
1314
1315 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1316 {
1317 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1318 struct pch_vbus_gpio_data, irq_work_rise);
1319 struct pch_udc_dev *dev =
1320 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1321 int vbus;
1322
1323 if (!dev->vbus_gpio.port)
1324 return;
1325
1326 mdelay(PCH_VBUS_INTERVAL);
1327 vbus = pch_vbus_gpio_get_value(dev);
1328
1329 if (vbus == 1) {
1330 dev_dbg(&dev->pdev->dev, "VBUS rose");
1331 pch_udc_reconnect(dev);
1332 return;
1333 }
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1346 {
1347 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1348
1349 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1350 return IRQ_NONE;
1351
1352 if (pch_vbus_gpio_get_value(dev))
1353 schedule_work(&dev->vbus_gpio.irq_work_rise);
1354 else
1355 schedule_work(&dev->vbus_gpio.irq_work_fall);
1356
1357 return IRQ_HANDLED;
1358 }
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
1369 {
1370 struct device *d = &dev->pdev->dev;
1371 int err;
1372 int irq_num = 0;
1373 struct gpio_desc *gpiod;
1374
1375 dev->vbus_gpio.port = NULL;
1376 dev->vbus_gpio.intr = 0;
1377
1378
1379 gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
1380 if (IS_ERR(gpiod))
1381 return PTR_ERR(gpiod);
1382 gpiod_set_consumer_name(gpiod, "pch_vbus");
1383
1384 dev->vbus_gpio.port = gpiod;
1385 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1386
1387 irq_num = gpiod_to_irq(gpiod);
1388 if (irq_num > 0) {
1389 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1390 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1391 "vbus_detect", dev);
1392 if (!err) {
1393 dev->vbus_gpio.intr = irq_num;
1394 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1395 pch_vbus_gpio_work_rise);
1396 } else {
1397 pr_err("%s: can't request irq %d, err: %d\n",
1398 __func__, irq_num, err);
1399 }
1400 }
1401
1402 return 0;
1403 }
1404
1405
1406
1407
1408
1409 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1410 {
1411 if (dev->vbus_gpio.intr)
1412 free_irq(dev->vbus_gpio.intr, dev);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1423 int status)
1424 __releases(&dev->lock)
1425 __acquires(&dev->lock)
1426 {
1427 struct pch_udc_dev *dev;
1428 unsigned halted = ep->halted;
1429
1430 list_del_init(&req->queue);
1431
1432
1433 if (req->req.status == -EINPROGRESS)
1434 req->req.status = status;
1435 else
1436 status = req->req.status;
1437
1438 dev = ep->dev;
1439 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
1440 ep->halted = 1;
1441 spin_unlock(&dev->lock);
1442 if (!ep->in)
1443 pch_udc_ep_clear_rrdy(ep);
1444 usb_gadget_giveback_request(&ep->ep, &req->req);
1445 spin_lock(&dev->lock);
1446 ep->halted = halted;
1447 }
1448
1449
1450
1451
1452
1453 static void empty_req_queue(struct pch_udc_ep *ep)
1454 {
1455 struct pch_udc_request *req;
1456
1457 ep->halted = 1;
1458 while (!list_empty(&ep->queue)) {
1459 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1460 complete_req(ep, req, -ESHUTDOWN);
1461 }
1462 }
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1474 struct pch_udc_request *req)
1475 {
1476 struct pch_udc_data_dma_desc *td = req->td_data;
1477 unsigned i = req->chain_len;
1478
1479 dma_addr_t addr2;
1480 dma_addr_t addr = (dma_addr_t)td->next;
1481 td->next = 0x00;
1482 for (; i > 1; --i) {
1483
1484 td = phys_to_virt(addr);
1485 addr2 = (dma_addr_t)td->next;
1486 dma_pool_free(dev->data_requests, td, addr);
1487 addr = addr2;
1488 }
1489 req->chain_len = 1;
1490 }
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1505 struct pch_udc_request *req,
1506 unsigned long buf_len,
1507 gfp_t gfp_flags)
1508 {
1509 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1510 unsigned long bytes = req->req.length, i = 0;
1511 dma_addr_t dma_addr;
1512 unsigned len = 1;
1513
1514 if (req->chain_len > 1)
1515 pch_udc_free_dma_chain(ep->dev, req);
1516
1517 td->dataptr = req->req.dma;
1518 td->status = PCH_UDC_BS_HST_BSY;
1519
1520 for (; ; bytes -= buf_len, ++len) {
1521 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1522 if (bytes <= buf_len)
1523 break;
1524 last = td;
1525 td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1526 &dma_addr);
1527 if (!td)
1528 goto nomem;
1529 i += buf_len;
1530 td->dataptr = req->td_data->dataptr + i;
1531 last->next = dma_addr;
1532 }
1533
1534 req->td_data_last = td;
1535 td->status |= PCH_UDC_DMA_LAST;
1536 td->next = req->td_data_phys;
1537 req->chain_len = len;
1538 return 0;
1539
1540 nomem:
1541 if (len > 1) {
1542 req->chain_len = len;
1543 pch_udc_free_dma_chain(ep->dev, req);
1544 }
1545 req->chain_len = 1;
1546 return -ENOMEM;
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1561 gfp_t gfp)
1562 {
1563 int retval;
1564
1565
1566 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1567 if (retval) {
1568 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1569 return retval;
1570 }
1571 if (ep->in)
1572 req->td_data->status = (req->td_data->status &
1573 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1574 return 0;
1575 }
1576
1577
1578
1579
1580
1581
1582
1583 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1584 {
1585 struct pch_udc_dev *dev = ep->dev;
1586
1587
1588 complete_req(ep, req, 0);
1589
1590
1591
1592
1593 if (dev->set_cfg_not_acked) {
1594 pch_udc_set_csr_done(dev);
1595 dev->set_cfg_not_acked = 0;
1596 }
1597
1598 if (!dev->stall && dev->waiting_zlp_ack) {
1599 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1600 dev->waiting_zlp_ack = 0;
1601 }
1602 }
1603
1604
1605
1606
1607
1608
1609 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1610 struct pch_udc_request *req)
1611 {
1612 struct pch_udc_data_dma_desc *td_data;
1613
1614 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1615 td_data = req->td_data;
1616
1617 while (1) {
1618 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1619 PCH_UDC_BS_HST_RDY;
1620 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1621 break;
1622 td_data = phys_to_virt(td_data->next);
1623 }
1624
1625 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1626 req->dma_going = 1;
1627 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1628 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1629 pch_udc_ep_clear_nak(ep);
1630 pch_udc_ep_set_rrdy(ep);
1631 }
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1645 const struct usb_endpoint_descriptor *desc)
1646 {
1647 struct pch_udc_ep *ep;
1648 struct pch_udc_dev *dev;
1649 unsigned long iflags;
1650
1651 if (!usbep || (usbep->name == ep0_string) || !desc ||
1652 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1653 return -EINVAL;
1654
1655 ep = container_of(usbep, struct pch_udc_ep, ep);
1656 dev = ep->dev;
1657 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1658 return -ESHUTDOWN;
1659 spin_lock_irqsave(&dev->lock, iflags);
1660 ep->ep.desc = desc;
1661 ep->halted = 0;
1662 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1663 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1664 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1665 spin_unlock_irqrestore(&dev->lock, iflags);
1666 return 0;
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1679 {
1680 struct pch_udc_ep *ep;
1681 unsigned long iflags;
1682
1683 if (!usbep)
1684 return -EINVAL;
1685
1686 ep = container_of(usbep, struct pch_udc_ep, ep);
1687 if ((usbep->name == ep0_string) || !ep->ep.desc)
1688 return -EINVAL;
1689
1690 spin_lock_irqsave(&ep->dev->lock, iflags);
1691 empty_req_queue(ep);
1692 ep->halted = 1;
1693 pch_udc_ep_disable(ep);
1694 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1695 ep->ep.desc = NULL;
1696 INIT_LIST_HEAD(&ep->queue);
1697 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1698 return 0;
1699 }
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1712 gfp_t gfp)
1713 {
1714 struct pch_udc_request *req;
1715 struct pch_udc_ep *ep;
1716 struct pch_udc_data_dma_desc *dma_desc;
1717
1718 if (!usbep)
1719 return NULL;
1720 ep = container_of(usbep, struct pch_udc_ep, ep);
1721 req = kzalloc(sizeof *req, gfp);
1722 if (!req)
1723 return NULL;
1724 req->req.dma = DMA_ADDR_INVALID;
1725 INIT_LIST_HEAD(&req->queue);
1726 if (!ep->dev->dma_addr)
1727 return &req->req;
1728
1729 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1730 &req->td_data_phys);
1731 if (NULL == dma_desc) {
1732 kfree(req);
1733 return NULL;
1734 }
1735
1736 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1737 dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
1738 req->td_data = dma_desc;
1739 req->td_data_last = dma_desc;
1740 req->chain_len = 1;
1741 return &req->req;
1742 }
1743
1744
1745
1746
1747
1748
1749
1750 static void pch_udc_free_request(struct usb_ep *usbep,
1751 struct usb_request *usbreq)
1752 {
1753 struct pch_udc_ep *ep;
1754 struct pch_udc_request *req;
1755 struct pch_udc_dev *dev;
1756
1757 if (!usbep || !usbreq)
1758 return;
1759 ep = container_of(usbep, struct pch_udc_ep, ep);
1760 req = container_of(usbreq, struct pch_udc_request, req);
1761 dev = ep->dev;
1762 if (!list_empty(&req->queue))
1763 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1764 __func__, usbep->name, req);
1765 if (req->td_data != NULL) {
1766 if (req->chain_len > 1)
1767 pch_udc_free_dma_chain(ep->dev, req);
1768 dma_pool_free(ep->dev->data_requests, req->td_data,
1769 req->td_data_phys);
1770 }
1771 kfree(req);
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1786 gfp_t gfp)
1787 {
1788 int retval = 0;
1789 struct pch_udc_ep *ep;
1790 struct pch_udc_dev *dev;
1791 struct pch_udc_request *req;
1792 unsigned long iflags;
1793
1794 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1795 return -EINVAL;
1796 ep = container_of(usbep, struct pch_udc_ep, ep);
1797 dev = ep->dev;
1798 if (!ep->ep.desc && ep->num)
1799 return -EINVAL;
1800 req = container_of(usbreq, struct pch_udc_request, req);
1801 if (!list_empty(&req->queue))
1802 return -EINVAL;
1803 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1804 return -ESHUTDOWN;
1805 spin_lock_irqsave(&dev->lock, iflags);
1806
1807 retval = usb_gadget_map_request(&dev->gadget, usbreq, ep->in);
1808 if (retval)
1809 goto probe_end;
1810 if (usbreq->length > 0) {
1811 retval = prepare_dma(ep, req, GFP_ATOMIC);
1812 if (retval)
1813 goto probe_end;
1814 }
1815 usbreq->actual = 0;
1816 usbreq->status = -EINPROGRESS;
1817 req->dma_done = 0;
1818 if (list_empty(&ep->queue) && !ep->halted) {
1819
1820 if (!usbreq->length) {
1821 process_zlp(ep, req);
1822 retval = 0;
1823 goto probe_end;
1824 }
1825 if (!ep->in) {
1826 pch_udc_start_rxrequest(ep, req);
1827 } else {
1828
1829
1830
1831
1832
1833 pch_udc_wait_ep_stall(ep);
1834 pch_udc_ep_clear_nak(ep);
1835 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1836 }
1837 }
1838
1839 if (req != NULL)
1840 list_add_tail(&req->queue, &ep->queue);
1841
1842 probe_end:
1843 spin_unlock_irqrestore(&dev->lock, iflags);
1844 return retval;
1845 }
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1858 struct usb_request *usbreq)
1859 {
1860 struct pch_udc_ep *ep;
1861 struct pch_udc_request *req;
1862 unsigned long flags;
1863 int ret = -EINVAL;
1864
1865 ep = container_of(usbep, struct pch_udc_ep, ep);
1866 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1867 return ret;
1868 req = container_of(usbreq, struct pch_udc_request, req);
1869 spin_lock_irqsave(&ep->dev->lock, flags);
1870
1871 list_for_each_entry(req, &ep->queue, queue) {
1872 if (&req->req == usbreq) {
1873 pch_udc_ep_set_nak(ep);
1874 if (!list_empty(&req->queue))
1875 complete_req(ep, req, -ECONNRESET);
1876 ret = 0;
1877 break;
1878 }
1879 }
1880 spin_unlock_irqrestore(&ep->dev->lock, flags);
1881 return ret;
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1895 {
1896 struct pch_udc_ep *ep;
1897 unsigned long iflags;
1898 int ret;
1899
1900 if (!usbep)
1901 return -EINVAL;
1902 ep = container_of(usbep, struct pch_udc_ep, ep);
1903 if (!ep->ep.desc && !ep->num)
1904 return -EINVAL;
1905 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1906 return -ESHUTDOWN;
1907 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1908 if (list_empty(&ep->queue)) {
1909 if (halt) {
1910 if (ep->num == PCH_UDC_EP0)
1911 ep->dev->stall = 1;
1912 pch_udc_ep_set_stall(ep);
1913 pch_udc_enable_ep_interrupts(
1914 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1915 } else {
1916 pch_udc_ep_clear_stall(ep);
1917 }
1918 ret = 0;
1919 } else {
1920 ret = -EAGAIN;
1921 }
1922 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1923 return ret;
1924 }
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
1936 {
1937 struct pch_udc_ep *ep;
1938 unsigned long iflags;
1939 int ret;
1940
1941 if (!usbep)
1942 return -EINVAL;
1943 ep = container_of(usbep, struct pch_udc_ep, ep);
1944 if (!ep->ep.desc && !ep->num)
1945 return -EINVAL;
1946 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1947 return -ESHUTDOWN;
1948 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1949 if (!list_empty(&ep->queue)) {
1950 ret = -EAGAIN;
1951 } else {
1952 if (ep->num == PCH_UDC_EP0)
1953 ep->dev->stall = 1;
1954 pch_udc_ep_set_stall(ep);
1955 pch_udc_enable_ep_interrupts(ep->dev,
1956 PCH_UDC_EPINT(ep->in, ep->num));
1957 ep->dev->prot_stall = 1;
1958 ret = 0;
1959 }
1960 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1961 return ret;
1962 }
1963
1964
1965
1966
1967
1968 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
1969 {
1970 struct pch_udc_ep *ep;
1971
1972 if (!usbep)
1973 return;
1974
1975 ep = container_of(usbep, struct pch_udc_ep, ep);
1976 if (ep->ep.desc || !ep->num)
1977 pch_udc_ep_fifo_flush(ep, ep->in);
1978 }
1979
1980 static const struct usb_ep_ops pch_udc_ep_ops = {
1981 .enable = pch_udc_pcd_ep_enable,
1982 .disable = pch_udc_pcd_ep_disable,
1983 .alloc_request = pch_udc_alloc_request,
1984 .free_request = pch_udc_free_request,
1985 .queue = pch_udc_pcd_queue,
1986 .dequeue = pch_udc_pcd_dequeue,
1987 .set_halt = pch_udc_pcd_set_halt,
1988 .set_wedge = pch_udc_pcd_set_wedge,
1989 .fifo_status = NULL,
1990 .fifo_flush = pch_udc_pcd_fifo_flush,
1991 };
1992
1993
1994
1995
1996
1997 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
1998 {
1999 static u32 pky_marker;
2000
2001 if (!td_stp)
2002 return;
2003 td_stp->reserved = ++pky_marker;
2004 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2005 td_stp->status = PCH_UDC_BS_HST_RDY;
2006 }
2007
2008
2009
2010
2011
2012
2013 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2014 {
2015 struct pch_udc_request *req;
2016 struct pch_udc_data_dma_desc *td_data;
2017
2018 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2019 return;
2020
2021 if (list_empty(&ep->queue))
2022 return;
2023
2024
2025 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2026 if (req->dma_going)
2027 return;
2028 if (!req->td_data)
2029 return;
2030 pch_udc_wait_ep_stall(ep);
2031 req->dma_going = 1;
2032 pch_udc_ep_set_ddptr(ep, 0);
2033 td_data = req->td_data;
2034 while (1) {
2035 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2036 PCH_UDC_BS_HST_RDY;
2037 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2038 break;
2039 td_data = phys_to_virt(td_data->next);
2040 }
2041 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2042 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2043 pch_udc_ep_set_pd(ep);
2044 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2045 pch_udc_ep_clear_nak(ep);
2046 }
2047
2048
2049
2050
2051
2052 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2053 {
2054 struct pch_udc_request *req;
2055 struct pch_udc_dev *dev = ep->dev;
2056
2057 if (list_empty(&ep->queue))
2058 return;
2059 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2060 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2061 PCH_UDC_BS_DMA_DONE)
2062 return;
2063 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2064 PCH_UDC_RTS_SUCC) {
2065 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2066 "epstatus=0x%08x\n",
2067 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2068 (int)(ep->epsts));
2069 return;
2070 }
2071
2072 req->req.actual = req->req.length;
2073 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2074 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2075 complete_req(ep, req, 0);
2076 req->dma_going = 0;
2077 if (!list_empty(&ep->queue)) {
2078 pch_udc_wait_ep_stall(ep);
2079 pch_udc_ep_clear_nak(ep);
2080 pch_udc_enable_ep_interrupts(ep->dev,
2081 PCH_UDC_EPINT(ep->in, ep->num));
2082 } else {
2083 pch_udc_disable_ep_interrupts(ep->dev,
2084 PCH_UDC_EPINT(ep->in, ep->num));
2085 }
2086 }
2087
2088
2089
2090
2091
2092 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2093 {
2094 struct pch_udc_request *req;
2095 struct pch_udc_dev *dev = ep->dev;
2096 unsigned int count;
2097 struct pch_udc_data_dma_desc *td;
2098 dma_addr_t addr;
2099
2100 if (list_empty(&ep->queue))
2101 return;
2102
2103 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2104 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2105 pch_udc_ep_set_ddptr(ep, 0);
2106 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2107 PCH_UDC_BS_DMA_DONE)
2108 td = req->td_data_last;
2109 else
2110 td = req->td_data;
2111
2112 while (1) {
2113 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2114 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2115 "epstatus=0x%08x\n",
2116 (req->td_data->status & PCH_UDC_RXTX_STS),
2117 (int)(ep->epsts));
2118 return;
2119 }
2120 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2121 if (td->status & PCH_UDC_DMA_LAST) {
2122 count = td->status & PCH_UDC_RXTX_BYTES;
2123 break;
2124 }
2125 if (td == req->td_data_last) {
2126 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2127 return;
2128 }
2129 addr = (dma_addr_t)td->next;
2130 td = phys_to_virt(addr);
2131 }
2132
2133 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2134 count = UDC_DMA_MAXPACKET;
2135 req->td_data->status |= PCH_UDC_DMA_LAST;
2136 td->status |= PCH_UDC_BS_HST_BSY;
2137
2138 req->dma_going = 0;
2139 req->req.actual = count;
2140 complete_req(ep, req, 0);
2141
2142 if (!list_empty(&ep->queue)) {
2143 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2144 pch_udc_start_rxrequest(ep, req);
2145 }
2146 }
2147
2148
2149
2150
2151
2152
2153
2154 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2155 {
2156 u32 epsts;
2157 struct pch_udc_ep *ep;
2158
2159 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2160 epsts = ep->epsts;
2161 ep->epsts = 0;
2162
2163 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2164 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2165 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2166 return;
2167 if ((epsts & UDC_EPSTS_BNA))
2168 return;
2169 if (epsts & UDC_EPSTS_HE)
2170 return;
2171 if (epsts & UDC_EPSTS_RSS) {
2172 pch_udc_ep_set_stall(ep);
2173 pch_udc_enable_ep_interrupts(ep->dev,
2174 PCH_UDC_EPINT(ep->in, ep->num));
2175 }
2176 if (epsts & UDC_EPSTS_RCS) {
2177 if (!dev->prot_stall) {
2178 pch_udc_ep_clear_stall(ep);
2179 } else {
2180 pch_udc_ep_set_stall(ep);
2181 pch_udc_enable_ep_interrupts(ep->dev,
2182 PCH_UDC_EPINT(ep->in, ep->num));
2183 }
2184 }
2185 if (epsts & UDC_EPSTS_TDC)
2186 pch_udc_complete_transfer(ep);
2187
2188 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2189 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2190 pch_udc_start_next_txrequest(ep);
2191 }
2192
2193
2194
2195
2196
2197
2198 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2199 {
2200 u32 epsts;
2201 struct pch_udc_ep *ep;
2202 struct pch_udc_request *req = NULL;
2203
2204 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2205 epsts = ep->epsts;
2206 ep->epsts = 0;
2207
2208 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2209
2210 req = list_entry(ep->queue.next, struct pch_udc_request,
2211 queue);
2212 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2213 PCH_UDC_BS_DMA_DONE) {
2214 if (!req->dma_going)
2215 pch_udc_start_rxrequest(ep, req);
2216 return;
2217 }
2218 }
2219 if (epsts & UDC_EPSTS_HE)
2220 return;
2221 if (epsts & UDC_EPSTS_RSS) {
2222 pch_udc_ep_set_stall(ep);
2223 pch_udc_enable_ep_interrupts(ep->dev,
2224 PCH_UDC_EPINT(ep->in, ep->num));
2225 }
2226 if (epsts & UDC_EPSTS_RCS) {
2227 if (!dev->prot_stall) {
2228 pch_udc_ep_clear_stall(ep);
2229 } else {
2230 pch_udc_ep_set_stall(ep);
2231 pch_udc_enable_ep_interrupts(ep->dev,
2232 PCH_UDC_EPINT(ep->in, ep->num));
2233 }
2234 }
2235 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2236 UDC_EPSTS_OUT_DATA) {
2237 if (ep->dev->prot_stall == 1) {
2238 pch_udc_ep_set_stall(ep);
2239 pch_udc_enable_ep_interrupts(ep->dev,
2240 PCH_UDC_EPINT(ep->in, ep->num));
2241 } else {
2242 pch_udc_complete_receiver(ep);
2243 }
2244 }
2245 if (list_empty(&ep->queue))
2246 pch_udc_set_dma(dev, DMA_DIR_RX);
2247 }
2248
2249 static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
2250 __must_hold(&dev->lock)
2251 {
2252 int rc;
2253
2254
2255 if (!dev->driver)
2256 return -ESHUTDOWN;
2257
2258 spin_unlock(&dev->lock);
2259 rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
2260 spin_lock(&dev->lock);
2261 return rc;
2262 }
2263
2264
2265
2266
2267
2268 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2269 {
2270 u32 epsts;
2271 struct pch_udc_ep *ep;
2272 struct pch_udc_ep *ep_out;
2273
2274 ep = &dev->ep[UDC_EP0IN_IDX];
2275 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2276 epsts = ep->epsts;
2277 ep->epsts = 0;
2278
2279 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2280 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2281 UDC_EPSTS_XFERDONE)))
2282 return;
2283 if ((epsts & UDC_EPSTS_BNA))
2284 return;
2285 if (epsts & UDC_EPSTS_HE)
2286 return;
2287 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2288 pch_udc_complete_transfer(ep);
2289 pch_udc_clear_dma(dev, DMA_DIR_RX);
2290 ep_out->td_data->status = (ep_out->td_data->status &
2291 ~PCH_UDC_BUFF_STS) |
2292 PCH_UDC_BS_HST_RDY;
2293 pch_udc_ep_clear_nak(ep_out);
2294 pch_udc_set_dma(dev, DMA_DIR_RX);
2295 pch_udc_ep_set_rrdy(ep_out);
2296 }
2297
2298 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2299 !(epsts & UDC_EPSTS_TXEMPTY))
2300 pch_udc_start_next_txrequest(ep);
2301 }
2302
2303
2304
2305
2306
2307
2308 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2309 __releases(&dev->lock)
2310 __acquires(&dev->lock)
2311 {
2312 u32 stat;
2313 int setup_supported;
2314 struct pch_udc_ep *ep;
2315
2316 ep = &dev->ep[UDC_EP0OUT_IDX];
2317 stat = ep->epsts;
2318 ep->epsts = 0;
2319
2320
2321 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2322 UDC_EPSTS_OUT_SETUP) {
2323 dev->stall = 0;
2324 dev->ep[UDC_EP0IN_IDX].halted = 0;
2325 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2326 dev->setup_data = ep->td_stp->request;
2327 pch_udc_init_setup_buff(ep->td_stp);
2328 pch_udc_clear_dma(dev, DMA_DIR_RX);
2329 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2330 dev->ep[UDC_EP0IN_IDX].in);
2331 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2332 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2333 else
2334 dev->gadget.ep0 = &ep->ep;
2335
2336 if ((dev->setup_data.bRequestType == 0x21) &&
2337 (dev->setup_data.bRequest == 0xFF))
2338 dev->prot_stall = 0;
2339
2340 setup_supported = pch_udc_gadget_setup(dev);
2341
2342 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2343 ep->td_data->status = (ep->td_data->status &
2344 ~PCH_UDC_BUFF_STS) |
2345 PCH_UDC_BS_HST_RDY;
2346 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2347 }
2348
2349 if (setup_supported >= 0 && setup_supported <
2350 UDC_EP0IN_MAX_PKT_SIZE) {
2351 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2352
2353
2354 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2355 pch_udc_set_dma(dev, DMA_DIR_RX);
2356 pch_udc_ep_clear_nak(ep);
2357 }
2358 } else if (setup_supported < 0) {
2359
2360 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2361 pch_udc_enable_ep_interrupts(ep->dev,
2362 PCH_UDC_EPINT(ep->in, ep->num));
2363 dev->stall = 0;
2364 pch_udc_set_dma(dev, DMA_DIR_RX);
2365 } else {
2366 dev->waiting_zlp_ack = 1;
2367 }
2368 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2369 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2370 pch_udc_clear_dma(dev, DMA_DIR_RX);
2371 pch_udc_ep_set_ddptr(ep, 0);
2372 if (!list_empty(&ep->queue)) {
2373 ep->epsts = stat;
2374 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2375 }
2376 pch_udc_set_dma(dev, DMA_DIR_RX);
2377 }
2378 pch_udc_ep_set_rrdy(ep);
2379 }
2380
2381
2382
2383
2384
2385
2386
2387
2388 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2389 {
2390 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2391 if (list_empty(&ep->queue))
2392 return;
2393 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2394 pch_udc_ep_clear_nak(ep);
2395 }
2396
2397
2398
2399
2400
2401
2402 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2403 {
2404 int i;
2405 struct pch_udc_ep *ep;
2406
2407 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2408
2409 if (ep_intr & (0x1 << i)) {
2410 ep = &dev->ep[UDC_EPIN_IDX(i)];
2411 ep->epsts = pch_udc_read_ep_status(ep);
2412 pch_udc_clear_ep_status(ep, ep->epsts);
2413 }
2414
2415 if (ep_intr & (0x10000 << i)) {
2416 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2417 ep->epsts = pch_udc_read_ep_status(ep);
2418 pch_udc_clear_ep_status(ep, ep->epsts);
2419 }
2420 }
2421 }
2422
2423
2424
2425
2426
2427
2428 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2429 {
2430 struct pch_udc_ep *ep;
2431 u32 val;
2432
2433
2434 ep = &dev->ep[UDC_EP0IN_IDX];
2435 pch_udc_clear_ep_control(ep);
2436 pch_udc_ep_fifo_flush(ep, ep->in);
2437 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2438 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2439
2440 ep->td_data = NULL;
2441 ep->td_stp = NULL;
2442 ep->td_data_phys = 0;
2443 ep->td_stp_phys = 0;
2444
2445
2446 ep = &dev->ep[UDC_EP0OUT_IDX];
2447 pch_udc_clear_ep_control(ep);
2448 pch_udc_ep_fifo_flush(ep, ep->in);
2449 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2450 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2451 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2452 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2453
2454
2455 pch_udc_init_setup_buff(ep->td_stp);
2456
2457 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2458
2459 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2460
2461
2462 ep->td_data->status = PCH_UDC_DMA_LAST;
2463 ep->td_data->dataptr = dev->dma_addr;
2464 ep->td_data->next = ep->td_data_phys;
2465
2466 pch_udc_ep_clear_nak(ep);
2467 }
2468
2469
2470
2471
2472
2473
2474 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2475 {
2476 struct pch_udc_ep *ep;
2477 int i;
2478
2479 pch_udc_clear_dma(dev, DMA_DIR_TX);
2480 pch_udc_clear_dma(dev, DMA_DIR_RX);
2481
2482 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2483
2484 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2485
2486 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2487 ep = &dev->ep[i];
2488 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2489 pch_udc_clear_ep_control(ep);
2490 pch_udc_ep_set_ddptr(ep, 0);
2491 pch_udc_write_csr(ep->dev, 0x00, i);
2492 }
2493 dev->stall = 0;
2494 dev->prot_stall = 0;
2495 dev->waiting_zlp_ack = 0;
2496 dev->set_cfg_not_acked = 0;
2497
2498
2499 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2500 ep = &dev->ep[i];
2501 pch_udc_ep_set_nak(ep);
2502 pch_udc_ep_fifo_flush(ep, ep->in);
2503
2504 empty_req_queue(ep);
2505 }
2506 if (dev->driver) {
2507 spin_unlock(&dev->lock);
2508 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2509 spin_lock(&dev->lock);
2510 }
2511 }
2512
2513
2514
2515
2516
2517
2518 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2519 {
2520 u32 dev_stat, dev_speed;
2521 u32 speed = USB_SPEED_FULL;
2522
2523 dev_stat = pch_udc_read_device_status(dev);
2524 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2525 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2526 switch (dev_speed) {
2527 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2528 speed = USB_SPEED_HIGH;
2529 break;
2530 case UDC_DEVSTS_ENUM_SPEED_FULL:
2531 speed = USB_SPEED_FULL;
2532 break;
2533 case UDC_DEVSTS_ENUM_SPEED_LOW:
2534 speed = USB_SPEED_LOW;
2535 break;
2536 default:
2537 BUG();
2538 }
2539 dev->gadget.speed = speed;
2540 pch_udc_activate_control_ep(dev);
2541 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2542 pch_udc_set_dma(dev, DMA_DIR_TX);
2543 pch_udc_set_dma(dev, DMA_DIR_RX);
2544 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2545
2546
2547 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2548 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2549 UDC_DEVINT_SI | UDC_DEVINT_SC);
2550 }
2551
2552
2553
2554
2555
2556
2557 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2558 {
2559 u32 reg, dev_stat = 0;
2560 int i;
2561
2562 dev_stat = pch_udc_read_device_status(dev);
2563 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2564 UDC_DEVSTS_INTF_SHIFT;
2565 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2566 UDC_DEVSTS_ALT_SHIFT;
2567 dev->set_cfg_not_acked = 1;
2568
2569 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2570 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2571 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2572 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2573 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2574
2575
2576 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2577 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2578 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2579 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2580 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2581 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2582 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2583
2584 pch_udc_ep_clear_stall(&(dev->ep[i]));
2585 dev->ep[i].halted = 0;
2586 }
2587 dev->stall = 0;
2588 pch_udc_gadget_setup(dev);
2589 }
2590
2591
2592
2593
2594
2595
2596 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2597 {
2598 int i;
2599 u32 reg, dev_stat = 0;
2600
2601 dev_stat = pch_udc_read_device_status(dev);
2602 dev->set_cfg_not_acked = 1;
2603 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2604 UDC_DEVSTS_CFG_SHIFT;
2605
2606 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2607 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2608 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2609
2610
2611 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2612 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2613 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2614 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2615 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2616
2617 pch_udc_ep_clear_stall(&(dev->ep[i]));
2618 dev->ep[i].halted = 0;
2619 }
2620 dev->stall = 0;
2621
2622
2623 pch_udc_gadget_setup(dev);
2624 }
2625
2626
2627
2628
2629
2630
2631
2632 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2633 {
2634 int vbus;
2635
2636
2637 if (dev_intr & UDC_DEVINT_UR) {
2638 pch_udc_svc_ur_interrupt(dev);
2639 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2640 }
2641
2642 if (dev_intr & UDC_DEVINT_ENUM) {
2643 pch_udc_svc_enum_interrupt(dev);
2644 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2645 }
2646
2647 if (dev_intr & UDC_DEVINT_SI)
2648 pch_udc_svc_intf_interrupt(dev);
2649
2650 if (dev_intr & UDC_DEVINT_SC)
2651 pch_udc_svc_cfg_interrupt(dev);
2652
2653 if (dev_intr & UDC_DEVINT_US) {
2654 if (dev->driver
2655 && dev->driver->suspend) {
2656 spin_unlock(&dev->lock);
2657 dev->driver->suspend(&dev->gadget);
2658 spin_lock(&dev->lock);
2659 }
2660
2661 vbus = pch_vbus_gpio_get_value(dev);
2662 if ((dev->vbus_session == 0)
2663 && (vbus != 1)) {
2664 if (dev->driver && dev->driver->disconnect) {
2665 spin_unlock(&dev->lock);
2666 dev->driver->disconnect(&dev->gadget);
2667 spin_lock(&dev->lock);
2668 }
2669 pch_udc_reconnect(dev);
2670 } else if ((dev->vbus_session == 0)
2671 && (vbus == 1)
2672 && !dev->vbus_gpio.intr)
2673 schedule_work(&dev->vbus_gpio.irq_work_fall);
2674
2675 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2676 }
2677
2678 if (dev_intr & UDC_DEVINT_SOF)
2679 dev_dbg(&dev->pdev->dev, "SOF\n");
2680
2681 if (dev_intr & UDC_DEVINT_ES)
2682 dev_dbg(&dev->pdev->dev, "ES\n");
2683
2684 if (dev_intr & UDC_DEVINT_RWKP)
2685 dev_dbg(&dev->pdev->dev, "RWKP\n");
2686 }
2687
2688
2689
2690
2691
2692
2693 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2694 {
2695 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2696 u32 dev_intr, ep_intr;
2697 int i;
2698
2699 dev_intr = pch_udc_read_device_interrupts(dev);
2700 ep_intr = pch_udc_read_ep_interrupts(dev);
2701
2702
2703 if (dev_intr == ep_intr)
2704 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2705 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2706
2707 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2708 return IRQ_HANDLED;
2709 }
2710 if (dev_intr)
2711
2712 pch_udc_write_device_interrupts(dev, dev_intr);
2713 if (ep_intr)
2714
2715 pch_udc_write_ep_interrupts(dev, ep_intr);
2716 if (!dev_intr && !ep_intr)
2717 return IRQ_NONE;
2718 spin_lock(&dev->lock);
2719 if (dev_intr)
2720 pch_udc_dev_isr(dev, dev_intr);
2721 if (ep_intr) {
2722 pch_udc_read_all_epstatus(dev, ep_intr);
2723
2724 if (ep_intr & UDC_EPINT_IN_EP0) {
2725 pch_udc_svc_control_in(dev);
2726 pch_udc_postsvc_epinters(dev, 0);
2727 }
2728
2729 if (ep_intr & UDC_EPINT_OUT_EP0)
2730 pch_udc_svc_control_out(dev);
2731
2732 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2733 if (ep_intr & (1 << i)) {
2734 pch_udc_svc_data_in(dev, i);
2735 pch_udc_postsvc_epinters(dev, i);
2736 }
2737 }
2738
2739 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2740 PCH_UDC_USED_EP_NUM); i++)
2741 if (ep_intr & (1 << i))
2742 pch_udc_svc_data_out(dev, i -
2743 UDC_EPINT_OUT_SHIFT);
2744 }
2745 spin_unlock(&dev->lock);
2746 return IRQ_HANDLED;
2747 }
2748
2749
2750
2751
2752
2753 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2754 {
2755
2756 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2757 UDC_EPINT_OUT_EP0);
2758
2759 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2760 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2761 UDC_DEVINT_SI | UDC_DEVINT_SC);
2762 }
2763
2764
2765
2766
2767
2768 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2769 {
2770 const char *const ep_string[] = {
2771 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2772 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2773 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2774 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2775 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2776 "ep15in", "ep15out",
2777 };
2778 int i;
2779
2780 dev->gadget.speed = USB_SPEED_UNKNOWN;
2781 INIT_LIST_HEAD(&dev->gadget.ep_list);
2782
2783
2784 memset(dev->ep, 0, sizeof dev->ep);
2785 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2786 struct pch_udc_ep *ep = &dev->ep[i];
2787 ep->dev = dev;
2788 ep->halted = 1;
2789 ep->num = i / 2;
2790 ep->in = ~i & 1;
2791 ep->ep.name = ep_string[i];
2792 ep->ep.ops = &pch_udc_ep_ops;
2793 if (ep->in) {
2794 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2795 ep->ep.caps.dir_in = true;
2796 } else {
2797 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2798 UDC_EP_REG_SHIFT;
2799 ep->ep.caps.dir_out = true;
2800 }
2801 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2802 ep->ep.caps.type_control = true;
2803 } else {
2804 ep->ep.caps.type_iso = true;
2805 ep->ep.caps.type_bulk = true;
2806 ep->ep.caps.type_int = true;
2807 }
2808
2809 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2810 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2811 INIT_LIST_HEAD(&ep->queue);
2812 }
2813 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2814 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2815
2816
2817 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2818 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2819
2820 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2821 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2822 }
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2833 {
2834 int ret;
2835
2836 pch_udc_init(dev);
2837 pch_udc_pcd_reinit(dev);
2838
2839 ret = pch_vbus_gpio_init(dev);
2840 if (ret)
2841 pch_udc_exit(dev);
2842 return ret;
2843 }
2844
2845
2846
2847
2848
2849 static int init_dma_pools(struct pch_udc_dev *dev)
2850 {
2851 struct pch_udc_stp_dma_desc *td_stp;
2852 struct pch_udc_data_dma_desc *td_data;
2853 void *ep0out_buf;
2854
2855
2856 dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2857 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2858 if (!dev->data_requests) {
2859 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2860 __func__);
2861 return -ENOMEM;
2862 }
2863
2864
2865 dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2866 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2867 if (!dev->stp_requests) {
2868 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2869 __func__);
2870 return -ENOMEM;
2871 }
2872
2873 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2874 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2875 if (!td_stp) {
2876 dev_err(&dev->pdev->dev,
2877 "%s: can't allocate setup dma descriptor\n", __func__);
2878 return -ENOMEM;
2879 }
2880 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2881
2882
2883 td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2884 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2885 if (!td_data) {
2886 dev_err(&dev->pdev->dev,
2887 "%s: can't allocate data dma descriptor\n", __func__);
2888 return -ENOMEM;
2889 }
2890 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2891 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2892 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2893 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2894 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2895
2896 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2897 GFP_KERNEL);
2898 if (!ep0out_buf)
2899 return -ENOMEM;
2900 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2901 UDC_EP0OUT_BUFF_SIZE * 4,
2902 DMA_FROM_DEVICE);
2903 return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
2904 }
2905
2906 static int pch_udc_start(struct usb_gadget *g,
2907 struct usb_gadget_driver *driver)
2908 {
2909 struct pch_udc_dev *dev = to_pch_udc(g);
2910
2911 driver->driver.bus = NULL;
2912 dev->driver = driver;
2913
2914
2915 pch_udc_setup_ep0(dev);
2916
2917
2918 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2919 pch_udc_clear_disconnect(dev);
2920
2921 dev->connected = 1;
2922 return 0;
2923 }
2924
2925 static int pch_udc_stop(struct usb_gadget *g)
2926 {
2927 struct pch_udc_dev *dev = to_pch_udc(g);
2928
2929 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2930
2931
2932 dev->driver = NULL;
2933 dev->connected = 0;
2934
2935
2936 pch_udc_set_disconnect(dev);
2937
2938 return 0;
2939 }
2940
2941 static void pch_vbus_gpio_remove_table(void *table)
2942 {
2943 gpiod_remove_lookup_table(table);
2944 }
2945
2946 static int pch_vbus_gpio_add_table(struct device *d, void *table)
2947 {
2948 gpiod_add_lookup_table(table);
2949 return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, table);
2950 }
2951
2952 static struct gpiod_lookup_table pch_udc_minnow_vbus_gpio_table = {
2953 .dev_id = "0000:02:02.4",
2954 .table = {
2955 GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
2956 {}
2957 },
2958 };
2959
2960 static int pch_udc_minnow_platform_init(struct device *d)
2961 {
2962 return pch_vbus_gpio_add_table(d, &pch_udc_minnow_vbus_gpio_table);
2963 }
2964
2965 static int pch_udc_quark_platform_init(struct device *d)
2966 {
2967 struct pch_udc_dev *dev = dev_get_drvdata(d);
2968
2969 dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
2970 return 0;
2971 }
2972
2973 static void pch_udc_shutdown(struct pci_dev *pdev)
2974 {
2975 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2976
2977 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2978 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2979
2980
2981 pch_udc_set_disconnect(dev);
2982 }
2983
2984 static void pch_udc_remove(struct pci_dev *pdev)
2985 {
2986 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2987
2988 usb_del_gadget_udc(&dev->gadget);
2989
2990
2991 if (dev->driver)
2992 dev_err(&pdev->dev,
2993 "%s: gadget driver still bound!!!\n", __func__);
2994
2995 dma_pool_destroy(dev->data_requests);
2996
2997 if (dev->stp_requests) {
2998
2999 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3000 dma_pool_free(dev->stp_requests,
3001 dev->ep[UDC_EP0OUT_IDX].td_stp,
3002 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3003 }
3004 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3005 dma_pool_free(dev->stp_requests,
3006 dev->ep[UDC_EP0OUT_IDX].td_data,
3007 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3008 }
3009 dma_pool_destroy(dev->stp_requests);
3010 }
3011
3012 if (dev->dma_addr)
3013 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3014 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3015
3016 pch_vbus_gpio_free(dev);
3017
3018 pch_udc_exit(dev);
3019 }
3020
3021 static int __maybe_unused pch_udc_suspend(struct device *d)
3022 {
3023 struct pch_udc_dev *dev = dev_get_drvdata(d);
3024
3025 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3026 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3027
3028 return 0;
3029 }
3030
3031 static int __maybe_unused pch_udc_resume(struct device *d)
3032 {
3033 return 0;
3034 }
3035
3036 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3037
3038 typedef int (*platform_init_fn)(struct device *);
3039
3040 static int pch_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3041 {
3042 platform_init_fn platform_init = (platform_init_fn)id->driver_data;
3043 int retval;
3044 struct pch_udc_dev *dev;
3045
3046
3047 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3048 if (!dev)
3049 return -ENOMEM;
3050
3051
3052 retval = pcim_enable_device(pdev);
3053 if (retval)
3054 return retval;
3055
3056 dev->bar = PCH_UDC_PCI_BAR;
3057 dev->pdev = pdev;
3058 pci_set_drvdata(pdev, dev);
3059
3060
3061 if (platform_init) {
3062 retval = platform_init(&pdev->dev);
3063 if (retval)
3064 return retval;
3065 }
3066
3067
3068 retval = pcim_iomap_regions(pdev, BIT(dev->bar), pci_name(pdev));
3069 if (retval)
3070 return retval;
3071
3072 dev->base_addr = pcim_iomap_table(pdev)[dev->bar];
3073
3074
3075 retval = pch_udc_pcd_init(dev);
3076 if (retval)
3077 return retval;
3078
3079 pci_enable_msi(pdev);
3080
3081 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3082 IRQF_SHARED, KBUILD_MODNAME, dev);
3083 if (retval) {
3084 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3085 pdev->irq);
3086 goto finished;
3087 }
3088
3089 pci_set_master(pdev);
3090 pci_try_set_mwi(pdev);
3091
3092
3093 spin_lock_init(&dev->lock);
3094 dev->gadget.ops = &pch_udc_ops;
3095
3096 retval = init_dma_pools(dev);
3097 if (retval)
3098 goto finished;
3099
3100 dev->gadget.name = KBUILD_MODNAME;
3101 dev->gadget.max_speed = USB_SPEED_HIGH;
3102
3103
3104 pch_udc_set_disconnect(dev);
3105 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3106 if (retval)
3107 goto finished;
3108 return 0;
3109
3110 finished:
3111 pch_udc_remove(pdev);
3112 return retval;
3113 }
3114
3115 static const struct pci_device_id pch_udc_pcidev_id[] = {
3116 {
3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3118 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3119 .class_mask = 0xffffffff,
3120 .driver_data = (kernel_ulong_t)&pch_udc_quark_platform_init,
3121 },
3122 {
3123 PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC,
3124 PCI_VENDOR_ID_CIRCUITCO, PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD),
3125 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3126 .class_mask = 0xffffffff,
3127 .driver_data = (kernel_ulong_t)&pch_udc_minnow_platform_init,
3128 },
3129 {
3130 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3131 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3132 .class_mask = 0xffffffff,
3133 },
3134 {
3135 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3136 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3137 .class_mask = 0xffffffff,
3138 },
3139 {
3140 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3141 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3142 .class_mask = 0xffffffff,
3143 },
3144 { 0 },
3145 };
3146
3147 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3148
3149 static struct pci_driver pch_udc_driver = {
3150 .name = KBUILD_MODNAME,
3151 .id_table = pch_udc_pcidev_id,
3152 .probe = pch_udc_probe,
3153 .remove = pch_udc_remove,
3154 .shutdown = pch_udc_shutdown,
3155 .driver = {
3156 .pm = &pch_udc_pm,
3157 },
3158 };
3159
3160 module_pci_driver(pch_udc_driver);
3161
3162 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3163 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3164 MODULE_LICENSE("GPL");