Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
0004  *
0005  * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
0006  * Copyright (C) 2012 Broadcom Corporation
0007  */
0008 
0009 #include <linux/bitops.h>
0010 #include <linux/bug.h>
0011 #include <linux/clk.h>
0012 #include <linux/compiler.h>
0013 #include <linux/debugfs.h>
0014 #include <linux/delay.h>
0015 #include <linux/device.h>
0016 #include <linux/dma-mapping.h>
0017 #include <linux/errno.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/ioport.h>
0020 #include <linux/kernel.h>
0021 #include <linux/list.h>
0022 #include <linux/module.h>
0023 #include <linux/moduleparam.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/sched.h>
0026 #include <linux/seq_file.h>
0027 #include <linux/slab.h>
0028 #include <linux/timer.h>
0029 #include <linux/usb.h>
0030 #include <linux/usb/ch9.h>
0031 #include <linux/usb/gadget.h>
0032 #include <linux/workqueue.h>
0033 
0034 #include <bcm63xx_cpu.h>
0035 #include <bcm63xx_iudma.h>
0036 #include <bcm63xx_dev_usb_usbd.h>
0037 #include <bcm63xx_io.h>
0038 #include <bcm63xx_regs.h>
0039 
0040 #define DRV_MODULE_NAME     "bcm63xx_udc"
0041 
0042 static const char bcm63xx_ep0name[] = "ep0";
0043 
0044 static const struct {
0045     const char *name;
0046     const struct usb_ep_caps caps;
0047 } bcm63xx_ep_info[] = {
0048 #define EP_INFO(_name, _caps) \
0049     { \
0050         .name = _name, \
0051         .caps = _caps, \
0052     }
0053 
0054     EP_INFO(bcm63xx_ep0name,
0055         USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
0056     EP_INFO("ep1in-bulk",
0057         USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
0058     EP_INFO("ep2out-bulk",
0059         USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
0060     EP_INFO("ep3in-int",
0061         USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
0062     EP_INFO("ep4out-int",
0063         USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
0064 
0065 #undef EP_INFO
0066 };
0067 
0068 static bool use_fullspeed;
0069 module_param(use_fullspeed, bool, S_IRUGO);
0070 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
0071 
0072 /*
0073  * RX IRQ coalescing options:
0074  *
0075  * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
0076  * driver is able to pass the "testusb" suite and recover from conditions like:
0077  *
0078  *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
0079  *   2) Host sends 512 bytes of data
0080  *   3) Host decides to reconfigure the device and sends SET_INTERFACE
0081  *   4) Device shuts down the endpoint and cancels the RX transaction
0082  *
0083  * true - one IRQ per transfer, for transfers <= 2048B.  Generates
0084  * considerably fewer IRQs, but error recovery is less robust.  Does not
0085  * reliably pass "testusb".
0086  *
0087  * TX always uses coalescing, because we can cancel partially complete TX
0088  * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
0089  * this on RX.
0090  */
0091 static bool irq_coalesce;
0092 module_param(irq_coalesce, bool, S_IRUGO);
0093 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
0094 
0095 #define BCM63XX_NUM_EP          5
0096 #define BCM63XX_NUM_IUDMA       6
0097 #define BCM63XX_NUM_FIFO_PAIRS      3
0098 
0099 #define IUDMA_RESET_TIMEOUT_US      10000
0100 
0101 #define IUDMA_EP0_RXCHAN        0
0102 #define IUDMA_EP0_TXCHAN        1
0103 
0104 #define IUDMA_MAX_FRAGMENT      2048
0105 #define BCM63XX_MAX_CTRL_PKT        64
0106 
0107 #define BCMEP_CTRL          0x00
0108 #define BCMEP_ISOC          0x01
0109 #define BCMEP_BULK          0x02
0110 #define BCMEP_INTR          0x03
0111 
0112 #define BCMEP_OUT           0x00
0113 #define BCMEP_IN            0x01
0114 
0115 #define BCM63XX_SPD_FULL        1
0116 #define BCM63XX_SPD_HIGH        0
0117 
0118 #define IUDMA_DMAC_OFFSET       0x200
0119 #define IUDMA_DMAS_OFFSET       0x400
0120 
0121 enum bcm63xx_ep0_state {
0122     EP0_REQUEUE,
0123     EP0_IDLE,
0124     EP0_IN_DATA_PHASE_SETUP,
0125     EP0_IN_DATA_PHASE_COMPLETE,
0126     EP0_OUT_DATA_PHASE_SETUP,
0127     EP0_OUT_DATA_PHASE_COMPLETE,
0128     EP0_OUT_STATUS_PHASE,
0129     EP0_IN_FAKE_STATUS_PHASE,
0130     EP0_SHUTDOWN,
0131 };
0132 
0133 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
0134     "REQUEUE",
0135     "IDLE",
0136     "IN_DATA_PHASE_SETUP",
0137     "IN_DATA_PHASE_COMPLETE",
0138     "OUT_DATA_PHASE_SETUP",
0139     "OUT_DATA_PHASE_COMPLETE",
0140     "OUT_STATUS_PHASE",
0141     "IN_FAKE_STATUS_PHASE",
0142     "SHUTDOWN",
0143 };
0144 
0145 /**
0146  * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
0147  * @ep_num: USB endpoint number.
0148  * @n_bds: Number of buffer descriptors in the ring.
0149  * @ep_type: Endpoint type (control, bulk, interrupt).
0150  * @dir: Direction (in, out).
0151  * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
0152  * @max_pkt_hs: Maximum packet size in high speed mode.
0153  * @max_pkt_fs: Maximum packet size in full speed mode.
0154  */
0155 struct iudma_ch_cfg {
0156     int             ep_num;
0157     int             n_bds;
0158     int             ep_type;
0159     int             dir;
0160     int             n_fifo_slots;
0161     int             max_pkt_hs;
0162     int             max_pkt_fs;
0163 };
0164 
0165 static const struct iudma_ch_cfg iudma_defaults[] = {
0166 
0167     /* This controller was designed to support a CDC/RNDIS application.
0168        It may be possible to reconfigure some of the endpoints, but
0169        the hardware limitations (FIFO sizing and number of DMA channels)
0170        may significantly impact flexibility and/or stability.  Change
0171        these values at your own risk.
0172 
0173           ep_num       ep_type           n_fifo_slots    max_pkt_fs
0174     idx      |  n_bds     |         dir       |  max_pkt_hs  |
0175      |       |    |       |          |        |      |       |       */
0176     [0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
0177     [1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
0178     [2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
0179     [3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
0180     [4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
0181     [5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
0182 };
0183 
0184 struct bcm63xx_udc;
0185 
0186 /**
0187  * struct iudma_ch - Represents the current state of a single IUDMA channel.
0188  * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
0189  * @ep_num: USB endpoint number.  -1 for ep0 RX.
0190  * @enabled: Whether bcm63xx_ep_enable() has been called.
0191  * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
0192  * @is_tx: true for TX, false for RX.
0193  * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
0194  * @udc: Reference to the device controller.
0195  * @read_bd: Next buffer descriptor to reap from the hardware.
0196  * @write_bd: Next BD available for a new packet.
0197  * @end_bd: Points to the final BD in the ring.
0198  * @n_bds_used: Number of BD entries currently occupied.
0199  * @bd_ring: Base pointer to the BD ring.
0200  * @bd_ring_dma: Physical (DMA) address of bd_ring.
0201  * @n_bds: Total number of BDs in the ring.
0202  *
0203  * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
0204  * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
0205  * only.
0206  *
0207  * Each bulk/intr endpoint has a single IUDMA channel and a single
0208  * struct usb_ep.
0209  */
0210 struct iudma_ch {
0211     unsigned int            ch_idx;
0212     int             ep_num;
0213     bool                enabled;
0214     int             max_pkt;
0215     bool                is_tx;
0216     struct bcm63xx_ep       *bep;
0217     struct bcm63xx_udc      *udc;
0218 
0219     struct bcm_enet_desc        *read_bd;
0220     struct bcm_enet_desc        *write_bd;
0221     struct bcm_enet_desc        *end_bd;
0222     int             n_bds_used;
0223 
0224     struct bcm_enet_desc        *bd_ring;
0225     dma_addr_t          bd_ring_dma;
0226     unsigned int            n_bds;
0227 };
0228 
0229 /**
0230  * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
0231  * @ep_num: USB endpoint number.
0232  * @iudma: Pointer to IUDMA channel state.
0233  * @ep: USB gadget layer representation of the EP.
0234  * @udc: Reference to the device controller.
0235  * @queue: Linked list of outstanding requests for this EP.
0236  * @halted: 1 if the EP is stalled; 0 otherwise.
0237  */
0238 struct bcm63xx_ep {
0239     unsigned int            ep_num;
0240     struct iudma_ch         *iudma;
0241     struct usb_ep           ep;
0242     struct bcm63xx_udc      *udc;
0243     struct list_head        queue;
0244     unsigned            halted:1;
0245 };
0246 
0247 /**
0248  * struct bcm63xx_req - Internal (driver) state of a single request.
0249  * @queue: Links back to the EP's request list.
0250  * @req: USB gadget layer representation of the request.
0251  * @offset: Current byte offset into the data buffer (next byte to queue).
0252  * @bd_bytes: Number of data bytes in outstanding BD entries.
0253  * @iudma: IUDMA channel used for the request.
0254  */
0255 struct bcm63xx_req {
0256     struct list_head        queue;      /* ep's requests */
0257     struct usb_request      req;
0258     unsigned int            offset;
0259     unsigned int            bd_bytes;
0260     struct iudma_ch         *iudma;
0261 };
0262 
0263 /**
0264  * struct bcm63xx_udc - Driver/hardware private context.
0265  * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
0266  * @dev: Generic Linux device structure.
0267  * @pd: Platform data (board/port info).
0268  * @usbd_clk: Clock descriptor for the USB device block.
0269  * @usbh_clk: Clock descriptor for the USB host block.
0270  * @gadget: USB device.
0271  * @driver: Driver for USB device.
0272  * @usbd_regs: Base address of the USBD/USB20D block.
0273  * @iudma_regs: Base address of the USBD's associated IUDMA block.
0274  * @bep: Array of endpoints, including ep0.
0275  * @iudma: Array of all IUDMA channels used by this controller.
0276  * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
0277  * @iface: USB interface number, from SET_INTERFACE wIndex.
0278  * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
0279  * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
0280  * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
0281  * @ep0state: Current state of the ep0 state machine.
0282  * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
0283  * @wedgemap: Bitmap of wedged endpoints.
0284  * @ep0_req_reset: USB reset is pending.
0285  * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
0286  * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
0287  * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
0288  * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
0289  * @ep0_reply: Pending reply from gadget driver.
0290  * @ep0_request: Outstanding ep0 request.
0291  */
0292 struct bcm63xx_udc {
0293     spinlock_t          lock;
0294 
0295     struct device           *dev;
0296     struct bcm63xx_usbd_platform_data *pd;
0297     struct clk          *usbd_clk;
0298     struct clk          *usbh_clk;
0299 
0300     struct usb_gadget       gadget;
0301     struct usb_gadget_driver    *driver;
0302 
0303     void __iomem            *usbd_regs;
0304     void __iomem            *iudma_regs;
0305 
0306     struct bcm63xx_ep       bep[BCM63XX_NUM_EP];
0307     struct iudma_ch         iudma[BCM63XX_NUM_IUDMA];
0308 
0309     int             cfg;
0310     int             iface;
0311     int             alt_iface;
0312 
0313     struct bcm63xx_req      ep0_ctrl_req;
0314     u8              *ep0_ctrl_buf;
0315 
0316     int             ep0state;
0317     struct work_struct      ep0_wq;
0318 
0319     unsigned long           wedgemap;
0320 
0321     unsigned            ep0_req_reset:1;
0322     unsigned            ep0_req_set_cfg:1;
0323     unsigned            ep0_req_set_iface:1;
0324     unsigned            ep0_req_shutdown:1;
0325 
0326     unsigned            ep0_req_completed:1;
0327     struct usb_request      *ep0_reply;
0328     struct usb_request      *ep0_request;
0329 };
0330 
0331 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
0332 
0333 /***********************************************************************
0334  * Convenience functions
0335  ***********************************************************************/
0336 
0337 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
0338 {
0339     return container_of(g, struct bcm63xx_udc, gadget);
0340 }
0341 
0342 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
0343 {
0344     return container_of(ep, struct bcm63xx_ep, ep);
0345 }
0346 
0347 static inline struct bcm63xx_req *our_req(struct usb_request *req)
0348 {
0349     return container_of(req, struct bcm63xx_req, req);
0350 }
0351 
0352 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
0353 {
0354     return bcm_readl(udc->usbd_regs + off);
0355 }
0356 
0357 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
0358 {
0359     bcm_writel(val, udc->usbd_regs + off);
0360 }
0361 
0362 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
0363 {
0364     return bcm_readl(udc->iudma_regs + off);
0365 }
0366 
0367 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
0368 {
0369     bcm_writel(val, udc->iudma_regs + off);
0370 }
0371 
0372 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
0373 {
0374     return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
0375             (ENETDMA_CHAN_WIDTH * chan));
0376 }
0377 
0378 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
0379                     int chan)
0380 {
0381     bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
0382             (ENETDMA_CHAN_WIDTH * chan));
0383 }
0384 
0385 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
0386 {
0387     return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
0388             (ENETDMA_CHAN_WIDTH * chan));
0389 }
0390 
0391 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
0392                     int chan)
0393 {
0394     bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
0395             (ENETDMA_CHAN_WIDTH * chan));
0396 }
0397 
0398 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
0399 {
0400     if (is_enabled) {
0401         clk_enable(udc->usbh_clk);
0402         clk_enable(udc->usbd_clk);
0403         udelay(10);
0404     } else {
0405         clk_disable(udc->usbd_clk);
0406         clk_disable(udc->usbh_clk);
0407     }
0408 }
0409 
0410 /***********************************************************************
0411  * Low-level IUDMA / FIFO operations
0412  ***********************************************************************/
0413 
0414 /**
0415  * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
0416  * @udc: Reference to the device controller.
0417  * @idx: Desired init_sel value.
0418  *
0419  * The "init_sel" signal is used as a selection index for both endpoints
0420  * and IUDMA channels.  Since these do not map 1:1, the use of this signal
0421  * depends on the context.
0422  */
0423 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
0424 {
0425     u32 val = usbd_readl(udc, USBD_CONTROL_REG);
0426 
0427     val &= ~USBD_CONTROL_INIT_SEL_MASK;
0428     val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
0429     usbd_writel(udc, val, USBD_CONTROL_REG);
0430 }
0431 
0432 /**
0433  * bcm63xx_set_stall - Enable/disable stall on one endpoint.
0434  * @udc: Reference to the device controller.
0435  * @bep: Endpoint on which to operate.
0436  * @is_stalled: true to enable stall, false to disable.
0437  *
0438  * See notes in bcm63xx_update_wedge() regarding automatic clearing of
0439  * halt/stall conditions.
0440  */
0441 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
0442     bool is_stalled)
0443 {
0444     u32 val;
0445 
0446     val = USBD_STALL_UPDATE_MASK |
0447         (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
0448         (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
0449     usbd_writel(udc, val, USBD_STALL_REG);
0450 }
0451 
0452 /**
0453  * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
0454  * @udc: Reference to the device controller.
0455  *
0456  * These parameters depend on the USB link speed.  Settings are
0457  * per-IUDMA-channel-pair.
0458  */
0459 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
0460 {
0461     int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
0462     u32 i, val, rx_fifo_slot, tx_fifo_slot;
0463 
0464     /* set up FIFO boundaries and packet sizes; this is done in pairs */
0465     rx_fifo_slot = tx_fifo_slot = 0;
0466     for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
0467         const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
0468         const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
0469 
0470         bcm63xx_ep_dma_select(udc, i >> 1);
0471 
0472         val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
0473             ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
0474              USBD_RXFIFO_CONFIG_END_SHIFT);
0475         rx_fifo_slot += rx_cfg->n_fifo_slots;
0476         usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
0477         usbd_writel(udc,
0478                 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
0479                 USBD_RXFIFO_EPSIZE_REG);
0480 
0481         val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
0482             ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
0483              USBD_TXFIFO_CONFIG_END_SHIFT);
0484         tx_fifo_slot += tx_cfg->n_fifo_slots;
0485         usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
0486         usbd_writel(udc,
0487                 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
0488                 USBD_TXFIFO_EPSIZE_REG);
0489 
0490         usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
0491     }
0492 }
0493 
0494 /**
0495  * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
0496  * @udc: Reference to the device controller.
0497  * @ep_num: Endpoint number.
0498  */
0499 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
0500 {
0501     u32 val;
0502 
0503     bcm63xx_ep_dma_select(udc, ep_num);
0504 
0505     val = usbd_readl(udc, USBD_CONTROL_REG);
0506     val |= USBD_CONTROL_FIFO_RESET_MASK;
0507     usbd_writel(udc, val, USBD_CONTROL_REG);
0508     usbd_readl(udc, USBD_CONTROL_REG);
0509 }
0510 
0511 /**
0512  * bcm63xx_fifo_reset - Flush all hardware FIFOs.
0513  * @udc: Reference to the device controller.
0514  */
0515 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
0516 {
0517     int i;
0518 
0519     for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
0520         bcm63xx_fifo_reset_ep(udc, i);
0521 }
0522 
0523 /**
0524  * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
0525  * @udc: Reference to the device controller.
0526  */
0527 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
0528 {
0529     u32 i, val;
0530 
0531     for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0532         const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
0533 
0534         if (cfg->ep_num < 0)
0535             continue;
0536 
0537         bcm63xx_ep_dma_select(udc, cfg->ep_num);
0538         val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
0539             ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
0540         usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
0541     }
0542 }
0543 
0544 /**
0545  * bcm63xx_ep_setup - Configure per-endpoint settings.
0546  * @udc: Reference to the device controller.
0547  *
0548  * This needs to be rerun if the speed/cfg/intf/altintf changes.
0549  */
0550 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
0551 {
0552     u32 val, i;
0553 
0554     usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
0555 
0556     for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0557         const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
0558         int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
0559                   cfg->max_pkt_hs : cfg->max_pkt_fs;
0560         int idx = cfg->ep_num;
0561 
0562         udc->iudma[i].max_pkt = max_pkt;
0563 
0564         if (idx < 0)
0565             continue;
0566         usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
0567 
0568         val = (idx << USBD_CSR_EP_LOG_SHIFT) |
0569               (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
0570               (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
0571               (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
0572               (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
0573               (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
0574               (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
0575         usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
0576     }
0577 }
0578 
0579 /**
0580  * iudma_write - Queue a single IUDMA transaction.
0581  * @udc: Reference to the device controller.
0582  * @iudma: IUDMA channel to use.
0583  * @breq: Request containing the transaction data.
0584  *
0585  * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
0586  * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
0587  * So iudma_write() may be called several times to fulfill a single
0588  * usb_request.
0589  *
0590  * For TX IUDMA, this can queue multiple buffer descriptors if needed.
0591  */
0592 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
0593     struct bcm63xx_req *breq)
0594 {
0595     int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
0596     unsigned int bytes_left = breq->req.length - breq->offset;
0597     const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
0598         iudma->max_pkt : IUDMA_MAX_FRAGMENT;
0599 
0600     iudma->n_bds_used = 0;
0601     breq->bd_bytes = 0;
0602     breq->iudma = iudma;
0603 
0604     if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
0605         extra_zero_pkt = 1;
0606 
0607     do {
0608         struct bcm_enet_desc *d = iudma->write_bd;
0609         u32 dmaflags = 0;
0610         unsigned int n_bytes;
0611 
0612         if (d == iudma->end_bd) {
0613             dmaflags |= DMADESC_WRAP_MASK;
0614             iudma->write_bd = iudma->bd_ring;
0615         } else {
0616             iudma->write_bd++;
0617         }
0618         iudma->n_bds_used++;
0619 
0620         n_bytes = min_t(int, bytes_left, max_bd_bytes);
0621         if (n_bytes)
0622             dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
0623         else
0624             dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
0625                     DMADESC_USB_ZERO_MASK;
0626 
0627         dmaflags |= DMADESC_OWNER_MASK;
0628         if (first_bd) {
0629             dmaflags |= DMADESC_SOP_MASK;
0630             first_bd = 0;
0631         }
0632 
0633         /*
0634          * extra_zero_pkt forces one more iteration through the loop
0635          * after all data is queued up, to send the zero packet
0636          */
0637         if (extra_zero_pkt && !bytes_left)
0638             extra_zero_pkt = 0;
0639 
0640         if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
0641             (n_bytes == bytes_left && !extra_zero_pkt)) {
0642             last_bd = 1;
0643             dmaflags |= DMADESC_EOP_MASK;
0644         }
0645 
0646         d->address = breq->req.dma + breq->offset;
0647         mb();
0648         d->len_stat = dmaflags;
0649 
0650         breq->offset += n_bytes;
0651         breq->bd_bytes += n_bytes;
0652         bytes_left -= n_bytes;
0653     } while (!last_bd);
0654 
0655     usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
0656             ENETDMAC_CHANCFG_REG, iudma->ch_idx);
0657 }
0658 
0659 /**
0660  * iudma_read - Check for IUDMA buffer completion.
0661  * @udc: Reference to the device controller.
0662  * @iudma: IUDMA channel to use.
0663  *
0664  * This checks to see if ALL of the outstanding BDs on the DMA channel
0665  * have been filled.  If so, it returns the actual transfer length;
0666  * otherwise it returns -EBUSY.
0667  */
0668 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
0669 {
0670     int i, actual_len = 0;
0671     struct bcm_enet_desc *d = iudma->read_bd;
0672 
0673     if (!iudma->n_bds_used)
0674         return -EINVAL;
0675 
0676     for (i = 0; i < iudma->n_bds_used; i++) {
0677         u32 dmaflags;
0678 
0679         dmaflags = d->len_stat;
0680 
0681         if (dmaflags & DMADESC_OWNER_MASK)
0682             return -EBUSY;
0683 
0684         actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
0685                   DMADESC_LENGTH_SHIFT;
0686         if (d == iudma->end_bd)
0687             d = iudma->bd_ring;
0688         else
0689             d++;
0690     }
0691 
0692     iudma->read_bd = d;
0693     iudma->n_bds_used = 0;
0694     return actual_len;
0695 }
0696 
0697 /**
0698  * iudma_reset_channel - Stop DMA on a single channel.
0699  * @udc: Reference to the device controller.
0700  * @iudma: IUDMA channel to reset.
0701  */
0702 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
0703 {
0704     int timeout = IUDMA_RESET_TIMEOUT_US;
0705     struct bcm_enet_desc *d;
0706     int ch_idx = iudma->ch_idx;
0707 
0708     if (!iudma->is_tx)
0709         bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
0710 
0711     /* stop DMA, then wait for the hardware to wrap up */
0712     usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
0713 
0714     while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
0715                    ENETDMAC_CHANCFG_EN_MASK) {
0716         udelay(1);
0717 
0718         /* repeatedly flush the FIFO data until the BD completes */
0719         if (iudma->is_tx && iudma->ep_num >= 0)
0720             bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
0721 
0722         if (!timeout--) {
0723             dev_err(udc->dev, "can't reset IUDMA channel %d\n",
0724                 ch_idx);
0725             break;
0726         }
0727         if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
0728             dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
0729                  ch_idx);
0730             usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
0731                     ENETDMAC_CHANCFG_REG, ch_idx);
0732         }
0733     }
0734     usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
0735 
0736     /* don't leave "live" HW-owned entries for the next guy to step on */
0737     for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
0738         d->len_stat = 0;
0739     mb();
0740 
0741     iudma->read_bd = iudma->write_bd = iudma->bd_ring;
0742     iudma->n_bds_used = 0;
0743 
0744     /* set up IRQs, UBUS burst size, and BD base for this channel */
0745     usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
0746             ENETDMAC_IRMASK_REG, ch_idx);
0747     usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
0748 
0749     usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
0750     usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
0751 }
0752 
0753 /**
0754  * iudma_init_channel - One-time IUDMA channel initialization.
0755  * @udc: Reference to the device controller.
0756  * @ch_idx: Channel to initialize.
0757  */
0758 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
0759 {
0760     struct iudma_ch *iudma = &udc->iudma[ch_idx];
0761     const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
0762     unsigned int n_bds = cfg->n_bds;
0763     struct bcm63xx_ep *bep = NULL;
0764 
0765     iudma->ep_num = cfg->ep_num;
0766     iudma->ch_idx = ch_idx;
0767     iudma->is_tx = !!(ch_idx & 0x01);
0768     if (iudma->ep_num >= 0) {
0769         bep = &udc->bep[iudma->ep_num];
0770         bep->iudma = iudma;
0771         INIT_LIST_HEAD(&bep->queue);
0772     }
0773 
0774     iudma->bep = bep;
0775     iudma->udc = udc;
0776 
0777     /* ep0 is always active; others are controlled by the gadget driver */
0778     if (iudma->ep_num <= 0)
0779         iudma->enabled = true;
0780 
0781     iudma->n_bds = n_bds;
0782     iudma->bd_ring = dmam_alloc_coherent(udc->dev,
0783         n_bds * sizeof(struct bcm_enet_desc),
0784         &iudma->bd_ring_dma, GFP_KERNEL);
0785     if (!iudma->bd_ring)
0786         return -ENOMEM;
0787     iudma->end_bd = &iudma->bd_ring[n_bds - 1];
0788 
0789     return 0;
0790 }
0791 
0792 /**
0793  * iudma_init - One-time initialization of all IUDMA channels.
0794  * @udc: Reference to the device controller.
0795  *
0796  * Enable DMA, flush channels, and enable global IUDMA IRQs.
0797  */
0798 static int iudma_init(struct bcm63xx_udc *udc)
0799 {
0800     int i, rc;
0801 
0802     usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
0803 
0804     for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
0805         rc = iudma_init_channel(udc, i);
0806         if (rc)
0807             return rc;
0808         iudma_reset_channel(udc, &udc->iudma[i]);
0809     }
0810 
0811     usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
0812     return 0;
0813 }
0814 
0815 /**
0816  * iudma_uninit - Uninitialize IUDMA channels.
0817  * @udc: Reference to the device controller.
0818  *
0819  * Kill global IUDMA IRQs, flush channels, and kill DMA.
0820  */
0821 static void iudma_uninit(struct bcm63xx_udc *udc)
0822 {
0823     int i;
0824 
0825     usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
0826 
0827     for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
0828         iudma_reset_channel(udc, &udc->iudma[i]);
0829 
0830     usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
0831 }
0832 
0833 /***********************************************************************
0834  * Other low-level USBD operations
0835  ***********************************************************************/
0836 
0837 /**
0838  * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
0839  * @udc: Reference to the device controller.
0840  * @enable_irqs: true to enable, false to disable.
0841  */
0842 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
0843 {
0844     u32 val;
0845 
0846     usbd_writel(udc, 0, USBD_STATUS_REG);
0847 
0848     val = BIT(USBD_EVENT_IRQ_USB_RESET) |
0849           BIT(USBD_EVENT_IRQ_SETUP) |
0850           BIT(USBD_EVENT_IRQ_SETCFG) |
0851           BIT(USBD_EVENT_IRQ_SETINTF) |
0852           BIT(USBD_EVENT_IRQ_USB_LINK);
0853     usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
0854     usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
0855 }
0856 
0857 /**
0858  * bcm63xx_select_phy_mode - Select between USB device and host mode.
0859  * @udc: Reference to the device controller.
0860  * @is_device: true for device, false for host.
0861  *
0862  * This should probably be reworked to use the drivers/usb/otg
0863  * infrastructure.
0864  *
0865  * By default, the AFE/pullups are disabled in device mode, until
0866  * bcm63xx_select_pullup() is called.
0867  */
0868 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
0869 {
0870     u32 val, portmask = BIT(udc->pd->port_no);
0871 
0872     if (BCMCPU_IS_6328()) {
0873         /* configure pinmux to sense VBUS signal */
0874         val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
0875         val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
0876         val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
0877                    GPIO_PINMUX_OTHR_6328_USB_HOST;
0878         bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
0879     }
0880 
0881     val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
0882     if (is_device) {
0883         val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
0884         val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0885     } else {
0886         val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
0887         val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0888     }
0889     bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
0890 
0891     val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
0892     if (is_device)
0893         val |= USBH_PRIV_SWAP_USBD_MASK;
0894     else
0895         val &= ~USBH_PRIV_SWAP_USBD_MASK;
0896     bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
0897 }
0898 
0899 /**
0900  * bcm63xx_select_pullup - Enable/disable the pullup on D+
0901  * @udc: Reference to the device controller.
0902  * @is_on: true to enable the pullup, false to disable.
0903  *
0904  * If the pullup is active, the host will sense a FS/HS device connected to
0905  * the port.  If the pullup is inactive, the host will think the USB
0906  * device has been disconnected.
0907  */
0908 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
0909 {
0910     u32 val, portmask = BIT(udc->pd->port_no);
0911 
0912     val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
0913     if (is_on)
0914         val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0915     else
0916         val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
0917     bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
0918 }
0919 
0920 /**
0921  * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
0922  * @udc: Reference to the device controller.
0923  *
0924  * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
0925  * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
0926  */
0927 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
0928 {
0929     set_clocks(udc, true);
0930     iudma_uninit(udc);
0931     set_clocks(udc, false);
0932 
0933     clk_put(udc->usbd_clk);
0934     clk_put(udc->usbh_clk);
0935 }
0936 
0937 /**
0938  * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
0939  * @udc: Reference to the device controller.
0940  */
0941 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
0942 {
0943     int i, rc = 0;
0944     u32 val;
0945 
0946     udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
0947                      GFP_KERNEL);
0948     if (!udc->ep0_ctrl_buf)
0949         return -ENOMEM;
0950 
0951     INIT_LIST_HEAD(&udc->gadget.ep_list);
0952     for (i = 0; i < BCM63XX_NUM_EP; i++) {
0953         struct bcm63xx_ep *bep = &udc->bep[i];
0954 
0955         bep->ep.name = bcm63xx_ep_info[i].name;
0956         bep->ep.caps = bcm63xx_ep_info[i].caps;
0957         bep->ep_num = i;
0958         bep->ep.ops = &bcm63xx_udc_ep_ops;
0959         list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
0960         bep->halted = 0;
0961         usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
0962         bep->udc = udc;
0963         bep->ep.desc = NULL;
0964         INIT_LIST_HEAD(&bep->queue);
0965     }
0966 
0967     udc->gadget.ep0 = &udc->bep[0].ep;
0968     list_del(&udc->bep[0].ep.ep_list);
0969 
0970     udc->gadget.speed = USB_SPEED_UNKNOWN;
0971     udc->ep0state = EP0_SHUTDOWN;
0972 
0973     udc->usbh_clk = clk_get(udc->dev, "usbh");
0974     if (IS_ERR(udc->usbh_clk))
0975         return -EIO;
0976 
0977     udc->usbd_clk = clk_get(udc->dev, "usbd");
0978     if (IS_ERR(udc->usbd_clk)) {
0979         clk_put(udc->usbh_clk);
0980         return -EIO;
0981     }
0982 
0983     set_clocks(udc, true);
0984 
0985     val = USBD_CONTROL_AUTO_CSRS_MASK |
0986           USBD_CONTROL_DONE_CSRS_MASK |
0987           (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
0988     usbd_writel(udc, val, USBD_CONTROL_REG);
0989 
0990     val = USBD_STRAPS_APP_SELF_PWR_MASK |
0991           USBD_STRAPS_APP_RAM_IF_MASK |
0992           USBD_STRAPS_APP_CSRPRGSUP_MASK |
0993           USBD_STRAPS_APP_8BITPHY_MASK |
0994           USBD_STRAPS_APP_RMTWKUP_MASK;
0995 
0996     if (udc->gadget.max_speed == USB_SPEED_HIGH)
0997         val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
0998     else
0999         val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1000     usbd_writel(udc, val, USBD_STRAPS_REG);
1001 
1002     bcm63xx_set_ctrl_irqs(udc, false);
1003 
1004     usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1005 
1006     val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1007           USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1008     usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1009 
1010     rc = iudma_init(udc);
1011     set_clocks(udc, false);
1012     if (rc)
1013         bcm63xx_uninit_udc_hw(udc);
1014 
1015     return 0;
1016 }
1017 
1018 /***********************************************************************
1019  * Standard EP gadget operations
1020  ***********************************************************************/
1021 
1022 /**
1023  * bcm63xx_ep_enable - Enable one endpoint.
1024  * @ep: Endpoint to enable.
1025  * @desc: Contains max packet, direction, etc.
1026  *
1027  * Most of the endpoint parameters are fixed in this controller, so there
1028  * isn't much for this function to do.
1029  */
1030 static int bcm63xx_ep_enable(struct usb_ep *ep,
1031     const struct usb_endpoint_descriptor *desc)
1032 {
1033     struct bcm63xx_ep *bep = our_ep(ep);
1034     struct bcm63xx_udc *udc = bep->udc;
1035     struct iudma_ch *iudma = bep->iudma;
1036     unsigned long flags;
1037 
1038     if (!ep || !desc || ep->name == bcm63xx_ep0name)
1039         return -EINVAL;
1040 
1041     if (!udc->driver)
1042         return -ESHUTDOWN;
1043 
1044     spin_lock_irqsave(&udc->lock, flags);
1045     if (iudma->enabled) {
1046         spin_unlock_irqrestore(&udc->lock, flags);
1047         return -EINVAL;
1048     }
1049 
1050     iudma->enabled = true;
1051     BUG_ON(!list_empty(&bep->queue));
1052 
1053     iudma_reset_channel(udc, iudma);
1054 
1055     bep->halted = 0;
1056     bcm63xx_set_stall(udc, bep, false);
1057     clear_bit(bep->ep_num, &udc->wedgemap);
1058 
1059     ep->desc = desc;
1060     ep->maxpacket = usb_endpoint_maxp(desc);
1061 
1062     spin_unlock_irqrestore(&udc->lock, flags);
1063     return 0;
1064 }
1065 
1066 /**
1067  * bcm63xx_ep_disable - Disable one endpoint.
1068  * @ep: Endpoint to disable.
1069  */
1070 static int bcm63xx_ep_disable(struct usb_ep *ep)
1071 {
1072     struct bcm63xx_ep *bep = our_ep(ep);
1073     struct bcm63xx_udc *udc = bep->udc;
1074     struct iudma_ch *iudma = bep->iudma;
1075     struct bcm63xx_req *breq, *n;
1076     unsigned long flags;
1077 
1078     if (!ep || !ep->desc)
1079         return -EINVAL;
1080 
1081     spin_lock_irqsave(&udc->lock, flags);
1082     if (!iudma->enabled) {
1083         spin_unlock_irqrestore(&udc->lock, flags);
1084         return -EINVAL;
1085     }
1086     iudma->enabled = false;
1087 
1088     iudma_reset_channel(udc, iudma);
1089 
1090     if (!list_empty(&bep->queue)) {
1091         list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1092             usb_gadget_unmap_request(&udc->gadget, &breq->req,
1093                          iudma->is_tx);
1094             list_del(&breq->queue);
1095             breq->req.status = -ESHUTDOWN;
1096 
1097             spin_unlock_irqrestore(&udc->lock, flags);
1098             usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1099             spin_lock_irqsave(&udc->lock, flags);
1100         }
1101     }
1102     ep->desc = NULL;
1103 
1104     spin_unlock_irqrestore(&udc->lock, flags);
1105     return 0;
1106 }
1107 
1108 /**
1109  * bcm63xx_udc_alloc_request - Allocate a new request.
1110  * @ep: Endpoint associated with the request.
1111  * @mem_flags: Flags to pass to kzalloc().
1112  */
1113 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1114     gfp_t mem_flags)
1115 {
1116     struct bcm63xx_req *breq;
1117 
1118     breq = kzalloc(sizeof(*breq), mem_flags);
1119     if (!breq)
1120         return NULL;
1121     return &breq->req;
1122 }
1123 
1124 /**
1125  * bcm63xx_udc_free_request - Free a request.
1126  * @ep: Endpoint associated with the request.
1127  * @req: Request to free.
1128  */
1129 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1130     struct usb_request *req)
1131 {
1132     struct bcm63xx_req *breq = our_req(req);
1133     kfree(breq);
1134 }
1135 
1136 /**
1137  * bcm63xx_udc_queue - Queue up a new request.
1138  * @ep: Endpoint associated with the request.
1139  * @req: Request to add.
1140  * @mem_flags: Unused.
1141  *
1142  * If the queue is empty, start this request immediately.  Otherwise, add
1143  * it to the list.
1144  *
1145  * ep0 replies are sent through this function from the gadget driver, but
1146  * they are treated differently because they need to be handled by the ep0
1147  * state machine.  (Sometimes they are replies to control requests that
1148  * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1149  */
1150 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1151     gfp_t mem_flags)
1152 {
1153     struct bcm63xx_ep *bep = our_ep(ep);
1154     struct bcm63xx_udc *udc = bep->udc;
1155     struct bcm63xx_req *breq = our_req(req);
1156     unsigned long flags;
1157     int rc = 0;
1158 
1159     if (unlikely(!req || !req->complete || !req->buf || !ep))
1160         return -EINVAL;
1161 
1162     req->actual = 0;
1163     req->status = 0;
1164     breq->offset = 0;
1165 
1166     if (bep == &udc->bep[0]) {
1167         /* only one reply per request, please */
1168         if (udc->ep0_reply)
1169             return -EINVAL;
1170 
1171         udc->ep0_reply = req;
1172         schedule_work(&udc->ep0_wq);
1173         return 0;
1174     }
1175 
1176     spin_lock_irqsave(&udc->lock, flags);
1177     if (!bep->iudma->enabled) {
1178         rc = -ESHUTDOWN;
1179         goto out;
1180     }
1181 
1182     rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1183     if (rc == 0) {
1184         list_add_tail(&breq->queue, &bep->queue);
1185         if (list_is_singular(&bep->queue))
1186             iudma_write(udc, bep->iudma, breq);
1187     }
1188 
1189 out:
1190     spin_unlock_irqrestore(&udc->lock, flags);
1191     return rc;
1192 }
1193 
1194 /**
1195  * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1196  * @ep: Endpoint associated with the request.
1197  * @req: Request to remove.
1198  *
1199  * If the request is not at the head of the queue, this is easy - just nuke
1200  * it.  If the request is at the head of the queue, we'll need to stop the
1201  * DMA transaction and then queue up the successor.
1202  */
1203 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1204 {
1205     struct bcm63xx_ep *bep = our_ep(ep);
1206     struct bcm63xx_udc *udc = bep->udc;
1207     struct bcm63xx_req *breq = our_req(req), *cur;
1208     unsigned long flags;
1209     int rc = 0;
1210 
1211     spin_lock_irqsave(&udc->lock, flags);
1212     if (list_empty(&bep->queue)) {
1213         rc = -EINVAL;
1214         goto out;
1215     }
1216 
1217     cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1218     usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1219 
1220     if (breq == cur) {
1221         iudma_reset_channel(udc, bep->iudma);
1222         list_del(&breq->queue);
1223 
1224         if (!list_empty(&bep->queue)) {
1225             struct bcm63xx_req *next;
1226 
1227             next = list_first_entry(&bep->queue,
1228                 struct bcm63xx_req, queue);
1229             iudma_write(udc, bep->iudma, next);
1230         }
1231     } else {
1232         list_del(&breq->queue);
1233     }
1234 
1235 out:
1236     spin_unlock_irqrestore(&udc->lock, flags);
1237 
1238     req->status = -ESHUTDOWN;
1239     req->complete(ep, req);
1240 
1241     return rc;
1242 }
1243 
1244 /**
1245  * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1246  * @ep: Endpoint to halt.
1247  * @value: Zero to clear halt; nonzero to set halt.
1248  *
1249  * See comments in bcm63xx_update_wedge().
1250  */
1251 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1252 {
1253     struct bcm63xx_ep *bep = our_ep(ep);
1254     struct bcm63xx_udc *udc = bep->udc;
1255     unsigned long flags;
1256 
1257     spin_lock_irqsave(&udc->lock, flags);
1258     bcm63xx_set_stall(udc, bep, !!value);
1259     bep->halted = value;
1260     spin_unlock_irqrestore(&udc->lock, flags);
1261 
1262     return 0;
1263 }
1264 
1265 /**
1266  * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1267  * @ep: Endpoint to wedge.
1268  *
1269  * See comments in bcm63xx_update_wedge().
1270  */
1271 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1272 {
1273     struct bcm63xx_ep *bep = our_ep(ep);
1274     struct bcm63xx_udc *udc = bep->udc;
1275     unsigned long flags;
1276 
1277     spin_lock_irqsave(&udc->lock, flags);
1278     set_bit(bep->ep_num, &udc->wedgemap);
1279     bcm63xx_set_stall(udc, bep, true);
1280     spin_unlock_irqrestore(&udc->lock, flags);
1281 
1282     return 0;
1283 }
1284 
1285 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1286     .enable     = bcm63xx_ep_enable,
1287     .disable    = bcm63xx_ep_disable,
1288 
1289     .alloc_request  = bcm63xx_udc_alloc_request,
1290     .free_request   = bcm63xx_udc_free_request,
1291 
1292     .queue      = bcm63xx_udc_queue,
1293     .dequeue    = bcm63xx_udc_dequeue,
1294 
1295     .set_halt   = bcm63xx_udc_set_halt,
1296     .set_wedge  = bcm63xx_udc_set_wedge,
1297 };
1298 
1299 /***********************************************************************
1300  * EP0 handling
1301  ***********************************************************************/
1302 
1303 /**
1304  * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1305  * @udc: Reference to the device controller.
1306  * @ctrl: 8-byte SETUP request.
1307  */
1308 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1309     struct usb_ctrlrequest *ctrl)
1310 {
1311     int rc;
1312 
1313     spin_unlock_irq(&udc->lock);
1314     rc = udc->driver->setup(&udc->gadget, ctrl);
1315     spin_lock_irq(&udc->lock);
1316     return rc;
1317 }
1318 
1319 /**
1320  * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1321  * @udc: Reference to the device controller.
1322  *
1323  * Many standard requests are handled automatically in the hardware, but
1324  * we still need to pass them to the gadget driver so that it can
1325  * reconfigure the interfaces/endpoints if necessary.
1326  *
1327  * Unfortunately we are not able to send a STALL response if the host
1328  * requests an invalid configuration.  If this happens, we'll have to be
1329  * content with printing a warning.
1330  */
1331 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1332 {
1333     struct usb_ctrlrequest ctrl;
1334     int rc;
1335 
1336     ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1337     ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1338     ctrl.wValue = cpu_to_le16(udc->cfg);
1339     ctrl.wIndex = 0;
1340     ctrl.wLength = 0;
1341 
1342     rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1343     if (rc < 0) {
1344         dev_warn_ratelimited(udc->dev,
1345             "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1346             udc->cfg);
1347     }
1348     return rc;
1349 }
1350 
1351 /**
1352  * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1353  * @udc: Reference to the device controller.
1354  */
1355 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1356 {
1357     struct usb_ctrlrequest ctrl;
1358     int rc;
1359 
1360     ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1361     ctrl.bRequest = USB_REQ_SET_INTERFACE;
1362     ctrl.wValue = cpu_to_le16(udc->alt_iface);
1363     ctrl.wIndex = cpu_to_le16(udc->iface);
1364     ctrl.wLength = 0;
1365 
1366     rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1367     if (rc < 0) {
1368         dev_warn_ratelimited(udc->dev,
1369             "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1370             udc->iface, udc->alt_iface);
1371     }
1372     return rc;
1373 }
1374 
1375 /**
1376  * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1377  * @udc: Reference to the device controller.
1378  * @ch_idx: IUDMA channel number.
1379  * @req: USB gadget layer representation of the request.
1380  */
1381 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1382     struct usb_request *req)
1383 {
1384     struct bcm63xx_req *breq = our_req(req);
1385     struct iudma_ch *iudma = &udc->iudma[ch_idx];
1386 
1387     BUG_ON(udc->ep0_request);
1388     udc->ep0_request = req;
1389 
1390     req->actual = 0;
1391     breq->offset = 0;
1392     usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1393     iudma_write(udc, iudma, breq);
1394 }
1395 
1396 /**
1397  * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1398  * @udc: Reference to the device controller.
1399  * @req: USB gadget layer representation of the request.
1400  * @status: Status to return to the gadget driver.
1401  */
1402 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1403     struct usb_request *req, int status)
1404 {
1405     req->status = status;
1406     if (status)
1407         req->actual = 0;
1408     if (req->complete) {
1409         spin_unlock_irq(&udc->lock);
1410         req->complete(&udc->bep[0].ep, req);
1411         spin_lock_irq(&udc->lock);
1412     }
1413 }
1414 
1415 /**
1416  * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1417  *   reset/shutdown.
1418  * @udc: Reference to the device controller.
1419  * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1420  */
1421 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1422 {
1423     struct usb_request *req = udc->ep0_reply;
1424 
1425     udc->ep0_reply = NULL;
1426     usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1427     if (udc->ep0_request == req) {
1428         udc->ep0_req_completed = 0;
1429         udc->ep0_request = NULL;
1430     }
1431     bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1432 }
1433 
1434 /**
1435  * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1436  *   transfer len.
1437  * @udc: Reference to the device controller.
1438  */
1439 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1440 {
1441     struct usb_request *req = udc->ep0_request;
1442 
1443     udc->ep0_req_completed = 0;
1444     udc->ep0_request = NULL;
1445 
1446     return req->actual;
1447 }
1448 
1449 /**
1450  * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1451  * @udc: Reference to the device controller.
1452  * @ch_idx: IUDMA channel number.
1453  * @length: Number of bytes to TX/RX.
1454  *
1455  * Used for simple transfers performed by the ep0 worker.  This will always
1456  * use ep0_ctrl_req / ep0_ctrl_buf.
1457  */
1458 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1459     int length)
1460 {
1461     struct usb_request *req = &udc->ep0_ctrl_req.req;
1462 
1463     req->buf = udc->ep0_ctrl_buf;
1464     req->length = length;
1465     req->complete = NULL;
1466 
1467     bcm63xx_ep0_map_write(udc, ch_idx, req);
1468 }
1469 
1470 /**
1471  * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1472  * @udc: Reference to the device controller.
1473  *
1474  * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1475  * for the next packet.  Anything else means the transaction requires multiple
1476  * stages of handling.
1477  */
1478 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1479 {
1480     int rc;
1481     struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1482 
1483     rc = bcm63xx_ep0_read_complete(udc);
1484 
1485     if (rc < 0) {
1486         dev_err(udc->dev, "missing SETUP packet\n");
1487         return EP0_IDLE;
1488     }
1489 
1490     /*
1491      * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1492      * ALWAYS deliver these 100% of the time, so if we happen to see one,
1493      * just throw it away.
1494      */
1495     if (rc == 0)
1496         return EP0_REQUEUE;
1497 
1498     /* Drop malformed SETUP packets */
1499     if (rc != sizeof(*ctrl)) {
1500         dev_warn_ratelimited(udc->dev,
1501             "malformed SETUP packet (%d bytes)\n", rc);
1502         return EP0_REQUEUE;
1503     }
1504 
1505     /* Process new SETUP packet arriving on ep0 */
1506     rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1507     if (rc < 0) {
1508         bcm63xx_set_stall(udc, &udc->bep[0], true);
1509         return EP0_REQUEUE;
1510     }
1511 
1512     if (!ctrl->wLength)
1513         return EP0_REQUEUE;
1514     else if (ctrl->bRequestType & USB_DIR_IN)
1515         return EP0_IN_DATA_PHASE_SETUP;
1516     else
1517         return EP0_OUT_DATA_PHASE_SETUP;
1518 }
1519 
1520 /**
1521  * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1522  * @udc: Reference to the device controller.
1523  *
1524  * In state EP0_IDLE, the RX descriptor is either pending, or has been
1525  * filled with a SETUP packet from the host.  This function handles new
1526  * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1527  * and reset/shutdown events.
1528  *
1529  * Returns 0 if work was done; -EAGAIN if nothing to do.
1530  */
1531 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1532 {
1533     if (udc->ep0_req_reset) {
1534         udc->ep0_req_reset = 0;
1535     } else if (udc->ep0_req_set_cfg) {
1536         udc->ep0_req_set_cfg = 0;
1537         if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1538             udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1539     } else if (udc->ep0_req_set_iface) {
1540         udc->ep0_req_set_iface = 0;
1541         if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1542             udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1543     } else if (udc->ep0_req_completed) {
1544         udc->ep0state = bcm63xx_ep0_do_setup(udc);
1545         return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1546     } else if (udc->ep0_req_shutdown) {
1547         udc->ep0_req_shutdown = 0;
1548         udc->ep0_req_completed = 0;
1549         udc->ep0_request = NULL;
1550         iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1551         usb_gadget_unmap_request(&udc->gadget,
1552             &udc->ep0_ctrl_req.req, 0);
1553 
1554         /* bcm63xx_udc_pullup() is waiting for this */
1555         mb();
1556         udc->ep0state = EP0_SHUTDOWN;
1557     } else if (udc->ep0_reply) {
1558         /*
1559          * This could happen if a USB RESET shows up during an ep0
1560          * transaction (especially if a laggy driver like gadgetfs
1561          * is in use).
1562          */
1563         dev_warn(udc->dev, "nuking unexpected reply\n");
1564         bcm63xx_ep0_nuke_reply(udc, 0);
1565     } else {
1566         return -EAGAIN;
1567     }
1568 
1569     return 0;
1570 }
1571 
1572 /**
1573  * bcm63xx_ep0_one_round - Handle the current ep0 state.
1574  * @udc: Reference to the device controller.
1575  *
1576  * Returns 0 if work was done; -EAGAIN if nothing to do.
1577  */
1578 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1579 {
1580     enum bcm63xx_ep0_state ep0state = udc->ep0state;
1581     bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1582 
1583     switch (udc->ep0state) {
1584     case EP0_REQUEUE:
1585         /* set up descriptor to receive SETUP packet */
1586         bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1587                          BCM63XX_MAX_CTRL_PKT);
1588         ep0state = EP0_IDLE;
1589         break;
1590     case EP0_IDLE:
1591         return bcm63xx_ep0_do_idle(udc);
1592     case EP0_IN_DATA_PHASE_SETUP:
1593         /*
1594          * Normal case: TX request is in ep0_reply (queued by the
1595          * callback), or will be queued shortly.  When it's here,
1596          * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1597          *
1598          * Shutdown case: Stop waiting for the reply.  Just
1599          * REQUEUE->IDLE.  The gadget driver is NOT expected to
1600          * queue anything else now.
1601          */
1602         if (udc->ep0_reply) {
1603             bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1604                           udc->ep0_reply);
1605             ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1606         } else if (shutdown) {
1607             ep0state = EP0_REQUEUE;
1608         }
1609         break;
1610     case EP0_IN_DATA_PHASE_COMPLETE: {
1611         /*
1612          * Normal case: TX packet (ep0_reply) is in flight; wait for
1613          * it to finish, then go back to REQUEUE->IDLE.
1614          *
1615          * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1616          * completion to the gadget driver, then REQUEUE->IDLE.
1617          */
1618         if (udc->ep0_req_completed) {
1619             udc->ep0_reply = NULL;
1620             bcm63xx_ep0_read_complete(udc);
1621             /*
1622              * the "ack" sometimes gets eaten (see
1623              * bcm63xx_ep0_do_idle)
1624              */
1625             ep0state = EP0_REQUEUE;
1626         } else if (shutdown) {
1627             iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1628             bcm63xx_ep0_nuke_reply(udc, 1);
1629             ep0state = EP0_REQUEUE;
1630         }
1631         break;
1632     }
1633     case EP0_OUT_DATA_PHASE_SETUP:
1634         /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1635         if (udc->ep0_reply) {
1636             bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1637                           udc->ep0_reply);
1638             ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1639         } else if (shutdown) {
1640             ep0state = EP0_REQUEUE;
1641         }
1642         break;
1643     case EP0_OUT_DATA_PHASE_COMPLETE: {
1644         /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1645         if (udc->ep0_req_completed) {
1646             udc->ep0_reply = NULL;
1647             bcm63xx_ep0_read_complete(udc);
1648 
1649             /* send 0-byte ack to host */
1650             bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1651             ep0state = EP0_OUT_STATUS_PHASE;
1652         } else if (shutdown) {
1653             iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1654             bcm63xx_ep0_nuke_reply(udc, 0);
1655             ep0state = EP0_REQUEUE;
1656         }
1657         break;
1658     }
1659     case EP0_OUT_STATUS_PHASE:
1660         /*
1661          * Normal case: 0-byte OUT ack packet is in flight; wait
1662          * for it to finish, then go back to REQUEUE->IDLE.
1663          *
1664          * Shutdown case: just cancel the transmission.  Don't bother
1665          * calling the completion, because it originated from this
1666          * function anyway.  Then go back to REQUEUE->IDLE.
1667          */
1668         if (udc->ep0_req_completed) {
1669             bcm63xx_ep0_read_complete(udc);
1670             ep0state = EP0_REQUEUE;
1671         } else if (shutdown) {
1672             iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1673             udc->ep0_request = NULL;
1674             ep0state = EP0_REQUEUE;
1675         }
1676         break;
1677     case EP0_IN_FAKE_STATUS_PHASE: {
1678         /*
1679          * Normal case: we spoofed a SETUP packet and are now
1680          * waiting for the gadget driver to send a 0-byte reply.
1681          * This doesn't actually get sent to the HW because the
1682          * HW has already sent its own reply.  Once we get the
1683          * response, return to IDLE.
1684          *
1685          * Shutdown case: return to IDLE immediately.
1686          *
1687          * Note that the ep0 RX descriptor has remained queued
1688          * (and possibly unfilled) during this entire transaction.
1689          * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1690          * or SET_INTERFACE transactions.
1691          */
1692         struct usb_request *r = udc->ep0_reply;
1693 
1694         if (!r) {
1695             if (shutdown)
1696                 ep0state = EP0_IDLE;
1697             break;
1698         }
1699 
1700         bcm63xx_ep0_complete(udc, r, 0);
1701         udc->ep0_reply = NULL;
1702         ep0state = EP0_IDLE;
1703         break;
1704     }
1705     case EP0_SHUTDOWN:
1706         break;
1707     }
1708 
1709     if (udc->ep0state == ep0state)
1710         return -EAGAIN;
1711 
1712     udc->ep0state = ep0state;
1713     return 0;
1714 }
1715 
1716 /**
1717  * bcm63xx_ep0_process - ep0 worker thread / state machine.
1718  * @w: Workqueue struct.
1719  *
1720  * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1721  * is used to synchronize ep0 events and ensure that both HW and SW events
1722  * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1723  * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1724  * by the USBD hardware.
1725  *
1726  * The worker function will continue iterating around the state machine
1727  * until there is nothing left to do.  Usually "nothing left to do" means
1728  * that we're waiting for a new event from the hardware.
1729  */
1730 static void bcm63xx_ep0_process(struct work_struct *w)
1731 {
1732     struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1733     spin_lock_irq(&udc->lock);
1734     while (bcm63xx_ep0_one_round(udc) == 0)
1735         ;
1736     spin_unlock_irq(&udc->lock);
1737 }
1738 
1739 /***********************************************************************
1740  * Standard UDC gadget operations
1741  ***********************************************************************/
1742 
1743 /**
1744  * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1745  * @gadget: USB device.
1746  */
1747 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1748 {
1749     struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1750 
1751     return (usbd_readl(udc, USBD_STATUS_REG) &
1752         USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1753 }
1754 
1755 /**
1756  * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1757  * @gadget: USB device.
1758  * @is_on: 0 to disable pullup, 1 to enable.
1759  *
1760  * See notes in bcm63xx_select_pullup().
1761  */
1762 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1763 {
1764     struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1765     unsigned long flags;
1766     int i, rc = -EINVAL;
1767 
1768     spin_lock_irqsave(&udc->lock, flags);
1769     if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1770         udc->gadget.speed = USB_SPEED_UNKNOWN;
1771         udc->ep0state = EP0_REQUEUE;
1772         bcm63xx_fifo_setup(udc);
1773         bcm63xx_fifo_reset(udc);
1774         bcm63xx_ep_setup(udc);
1775 
1776         bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1777         for (i = 0; i < BCM63XX_NUM_EP; i++)
1778             bcm63xx_set_stall(udc, &udc->bep[i], false);
1779 
1780         bcm63xx_set_ctrl_irqs(udc, true);
1781         bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1782         rc = 0;
1783     } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1784         bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1785 
1786         udc->ep0_req_shutdown = 1;
1787         spin_unlock_irqrestore(&udc->lock, flags);
1788 
1789         while (1) {
1790             schedule_work(&udc->ep0_wq);
1791             if (udc->ep0state == EP0_SHUTDOWN)
1792                 break;
1793             msleep(50);
1794         }
1795         bcm63xx_set_ctrl_irqs(udc, false);
1796         cancel_work_sync(&udc->ep0_wq);
1797         return 0;
1798     }
1799 
1800     spin_unlock_irqrestore(&udc->lock, flags);
1801     return rc;
1802 }
1803 
1804 /**
1805  * bcm63xx_udc_start - Start the controller.
1806  * @gadget: USB device.
1807  * @driver: Driver for USB device.
1808  */
1809 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1810         struct usb_gadget_driver *driver)
1811 {
1812     struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1813     unsigned long flags;
1814 
1815     if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1816         !driver->setup)
1817         return -EINVAL;
1818     if (!udc)
1819         return -ENODEV;
1820     if (udc->driver)
1821         return -EBUSY;
1822 
1823     spin_lock_irqsave(&udc->lock, flags);
1824 
1825     set_clocks(udc, true);
1826     bcm63xx_fifo_setup(udc);
1827     bcm63xx_ep_init(udc);
1828     bcm63xx_ep_setup(udc);
1829     bcm63xx_fifo_reset(udc);
1830     bcm63xx_select_phy_mode(udc, true);
1831 
1832     udc->driver = driver;
1833     driver->driver.bus = NULL;
1834     udc->gadget.dev.of_node = udc->dev->of_node;
1835 
1836     spin_unlock_irqrestore(&udc->lock, flags);
1837 
1838     return 0;
1839 }
1840 
1841 /**
1842  * bcm63xx_udc_stop - Shut down the controller.
1843  * @gadget: USB device.
1844  * @driver: Driver for USB device.
1845  */
1846 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1847 {
1848     struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1849     unsigned long flags;
1850 
1851     spin_lock_irqsave(&udc->lock, flags);
1852 
1853     udc->driver = NULL;
1854 
1855     /*
1856      * If we switch the PHY too abruptly after dropping D+, the host
1857      * will often complain:
1858      *
1859      *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1860      */
1861     msleep(100);
1862 
1863     bcm63xx_select_phy_mode(udc, false);
1864     set_clocks(udc, false);
1865 
1866     spin_unlock_irqrestore(&udc->lock, flags);
1867 
1868     return 0;
1869 }
1870 
1871 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1872     .get_frame  = bcm63xx_udc_get_frame,
1873     .pullup     = bcm63xx_udc_pullup,
1874     .udc_start  = bcm63xx_udc_start,
1875     .udc_stop   = bcm63xx_udc_stop,
1876 };
1877 
1878 /***********************************************************************
1879  * IRQ handling
1880  ***********************************************************************/
1881 
1882 /**
1883  * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1884  * @udc: Reference to the device controller.
1885  *
1886  * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1887  * The driver never sees the raw control packets coming in on the ep0
1888  * IUDMA channel, but at least we get an interrupt event to tell us that
1889  * new values are waiting in the USBD_STATUS register.
1890  */
1891 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1892 {
1893     u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1894 
1895     udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1896     udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1897     udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1898              USBD_STATUS_ALTINTF_SHIFT;
1899     bcm63xx_ep_setup(udc);
1900 }
1901 
1902 /**
1903  * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1904  * @udc: Reference to the device controller.
1905  *
1906  * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1907  * speed has changed, so that the caller can update the endpoint settings.
1908  */
1909 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1910 {
1911     u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1912     enum usb_device_speed oldspeed = udc->gadget.speed;
1913 
1914     switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1915     case BCM63XX_SPD_HIGH:
1916         udc->gadget.speed = USB_SPEED_HIGH;
1917         break;
1918     case BCM63XX_SPD_FULL:
1919         udc->gadget.speed = USB_SPEED_FULL;
1920         break;
1921     default:
1922         /* this should never happen */
1923         udc->gadget.speed = USB_SPEED_UNKNOWN;
1924         dev_err(udc->dev,
1925             "received SETUP packet with invalid link speed\n");
1926         return 0;
1927     }
1928 
1929     if (udc->gadget.speed != oldspeed) {
1930         dev_info(udc->dev, "link up, %s-speed mode\n",
1931              udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1932         return 1;
1933     } else {
1934         return 0;
1935     }
1936 }
1937 
1938 /**
1939  * bcm63xx_update_wedge - Iterate through wedged endpoints.
1940  * @udc: Reference to the device controller.
1941  * @new_status: true to "refresh" wedge status; false to clear it.
1942  *
1943  * On a SETUP interrupt, we need to manually "refresh" the wedge status
1944  * because the controller hardware is designed to automatically clear
1945  * stalls in response to a CLEAR_FEATURE request from the host.
1946  *
1947  * On a RESET interrupt, we do want to restore all wedged endpoints.
1948  */
1949 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1950 {
1951     int i;
1952 
1953     for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1954         bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1955         if (!new_status)
1956             clear_bit(i, &udc->wedgemap);
1957     }
1958 }
1959 
1960 /**
1961  * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1962  * @irq: IRQ number (unused).
1963  * @dev_id: Reference to the device controller.
1964  *
1965  * This is where we handle link (VBUS) down, USB reset, speed changes,
1966  * SET_CONFIGURATION, and SET_INTERFACE events.
1967  */
1968 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1969 {
1970     struct bcm63xx_udc *udc = dev_id;
1971     u32 stat;
1972     bool disconnected = false, bus_reset = false;
1973 
1974     stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1975            usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1976 
1977     usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1978 
1979     spin_lock(&udc->lock);
1980     if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1981         /* VBUS toggled */
1982 
1983         if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1984               USBD_EVENTS_USB_LINK_MASK) &&
1985               udc->gadget.speed != USB_SPEED_UNKNOWN)
1986             dev_info(udc->dev, "link down\n");
1987 
1988         udc->gadget.speed = USB_SPEED_UNKNOWN;
1989         disconnected = true;
1990     }
1991     if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1992         bcm63xx_fifo_setup(udc);
1993         bcm63xx_fifo_reset(udc);
1994         bcm63xx_ep_setup(udc);
1995 
1996         bcm63xx_update_wedge(udc, false);
1997 
1998         udc->ep0_req_reset = 1;
1999         schedule_work(&udc->ep0_wq);
2000         bus_reset = true;
2001     }
2002     if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2003         if (bcm63xx_update_link_speed(udc)) {
2004             bcm63xx_fifo_setup(udc);
2005             bcm63xx_ep_setup(udc);
2006         }
2007         bcm63xx_update_wedge(udc, true);
2008     }
2009     if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2010         bcm63xx_update_cfg_iface(udc);
2011         udc->ep0_req_set_cfg = 1;
2012         schedule_work(&udc->ep0_wq);
2013     }
2014     if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2015         bcm63xx_update_cfg_iface(udc);
2016         udc->ep0_req_set_iface = 1;
2017         schedule_work(&udc->ep0_wq);
2018     }
2019     spin_unlock(&udc->lock);
2020 
2021     if (disconnected && udc->driver)
2022         udc->driver->disconnect(&udc->gadget);
2023     else if (bus_reset && udc->driver)
2024         usb_gadget_udc_reset(&udc->gadget, udc->driver);
2025 
2026     return IRQ_HANDLED;
2027 }
2028 
2029 /**
2030  * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2031  * @irq: IRQ number (unused).
2032  * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2033  *
2034  * For the two ep0 channels, we have special handling that triggers the
2035  * ep0 worker thread.  For normal bulk/intr channels, either queue up
2036  * the next buffer descriptor for the transaction (incomplete transaction),
2037  * or invoke the completion callback (complete transactions).
2038  */
2039 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2040 {
2041     struct iudma_ch *iudma = dev_id;
2042     struct bcm63xx_udc *udc = iudma->udc;
2043     struct bcm63xx_ep *bep;
2044     struct usb_request *req = NULL;
2045     struct bcm63xx_req *breq = NULL;
2046     int rc;
2047     bool is_done = false;
2048 
2049     spin_lock(&udc->lock);
2050 
2051     usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2052             ENETDMAC_IR_REG, iudma->ch_idx);
2053     bep = iudma->bep;
2054     rc = iudma_read(udc, iudma);
2055 
2056     /* special handling for EP0 RX (0) and TX (1) */
2057     if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2058         iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2059         req = udc->ep0_request;
2060         breq = our_req(req);
2061 
2062         /* a single request could require multiple submissions */
2063         if (rc >= 0) {
2064             req->actual += rc;
2065 
2066             if (req->actual >= req->length || breq->bd_bytes > rc) {
2067                 udc->ep0_req_completed = 1;
2068                 is_done = true;
2069                 schedule_work(&udc->ep0_wq);
2070 
2071                 /* "actual" on a ZLP is 1 byte */
2072                 req->actual = min(req->actual, req->length);
2073             } else {
2074                 /* queue up the next BD (same request) */
2075                 iudma_write(udc, iudma, breq);
2076             }
2077         }
2078     } else if (!list_empty(&bep->queue)) {
2079         breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2080         req = &breq->req;
2081 
2082         if (rc >= 0) {
2083             req->actual += rc;
2084 
2085             if (req->actual >= req->length || breq->bd_bytes > rc) {
2086                 is_done = true;
2087                 list_del(&breq->queue);
2088 
2089                 req->actual = min(req->actual, req->length);
2090 
2091                 if (!list_empty(&bep->queue)) {
2092                     struct bcm63xx_req *next;
2093 
2094                     next = list_first_entry(&bep->queue,
2095                         struct bcm63xx_req, queue);
2096                     iudma_write(udc, iudma, next);
2097                 }
2098             } else {
2099                 iudma_write(udc, iudma, breq);
2100             }
2101         }
2102     }
2103     spin_unlock(&udc->lock);
2104 
2105     if (is_done) {
2106         usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2107         if (req->complete)
2108             req->complete(&bep->ep, req);
2109     }
2110 
2111     return IRQ_HANDLED;
2112 }
2113 
2114 /***********************************************************************
2115  * Debug filesystem
2116  ***********************************************************************/
2117 
2118 /*
2119  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2120  * @s: seq_file to which the information will be written.
2121  * @p: Unused.
2122  *
2123  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2124  */
2125 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2126 {
2127     struct bcm63xx_udc *udc = s->private;
2128 
2129     if (!udc->driver)
2130         return -ENODEV;
2131 
2132     seq_printf(s, "ep0 state: %s\n",
2133            bcm63xx_ep0_state_names[udc->ep0state]);
2134     seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2135            udc->ep0_req_reset ? "reset " : "",
2136            udc->ep0_req_set_cfg ? "set_cfg " : "",
2137            udc->ep0_req_set_iface ? "set_iface " : "",
2138            udc->ep0_req_shutdown ? "shutdown " : "",
2139            udc->ep0_request ? "pending " : "",
2140            udc->ep0_req_completed ? "completed " : "",
2141            udc->ep0_reply ? "reply " : "");
2142     seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2143            udc->cfg, udc->iface, udc->alt_iface);
2144     seq_printf(s, "regs:\n");
2145     seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2146            usbd_readl(udc, USBD_CONTROL_REG),
2147            usbd_readl(udc, USBD_STRAPS_REG),
2148            usbd_readl(udc, USBD_STATUS_REG));
2149     seq_printf(s, "  events:  %08x; stall:  %08x\n",
2150            usbd_readl(udc, USBD_EVENTS_REG),
2151            usbd_readl(udc, USBD_STALL_REG));
2152 
2153     return 0;
2154 }
2155 DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2156 
2157 /*
2158  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2159  * @s: seq_file to which the information will be written.
2160  * @p: Unused.
2161  *
2162  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2163  */
2164 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2165 {
2166     struct bcm63xx_udc *udc = s->private;
2167     int ch_idx, i;
2168     u32 sram2, sram3;
2169 
2170     if (!udc->driver)
2171         return -ENODEV;
2172 
2173     for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2174         struct iudma_ch *iudma = &udc->iudma[ch_idx];
2175         struct list_head *pos;
2176 
2177         seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2178         switch (iudma_defaults[ch_idx].ep_type) {
2179         case BCMEP_CTRL:
2180             seq_printf(s, "control");
2181             break;
2182         case BCMEP_BULK:
2183             seq_printf(s, "bulk");
2184             break;
2185         case BCMEP_INTR:
2186             seq_printf(s, "interrupt");
2187             break;
2188         }
2189         seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2190         seq_printf(s, " [ep%d]:\n",
2191                max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2192         seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2193                usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2194                usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2195                usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2196                usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2197 
2198         sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2199         sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2200         seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2201                usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2202                sram2 >> 16, sram2 & 0xffff,
2203                sram3 >> 16, sram3 & 0xffff,
2204                usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2205         seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2206                iudma->n_bds);
2207 
2208         if (iudma->bep) {
2209             i = 0;
2210             list_for_each(pos, &iudma->bep->queue)
2211                 i++;
2212             seq_printf(s, "; %d queued\n", i);
2213         } else {
2214             seq_printf(s, "\n");
2215         }
2216 
2217         for (i = 0; i < iudma->n_bds; i++) {
2218             struct bcm_enet_desc *d = &iudma->bd_ring[i];
2219 
2220             seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2221                    i * sizeof(*d), i,
2222                    d->len_stat >> 16, d->len_stat & 0xffff,
2223                    d->address);
2224             if (d == iudma->read_bd)
2225                 seq_printf(s, "   <<RD");
2226             if (d == iudma->write_bd)
2227                 seq_printf(s, "   <<WR");
2228             seq_printf(s, "\n");
2229         }
2230 
2231         seq_printf(s, "\n");
2232     }
2233 
2234     return 0;
2235 }
2236 DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2237 
2238 /**
2239  * bcm63xx_udc_init_debugfs - Create debugfs entries.
2240  * @udc: Reference to the device controller.
2241  */
2242 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2243 {
2244     struct dentry *root;
2245 
2246     if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2247         return;
2248 
2249     root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2250     debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2251     debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2252 }
2253 
2254 /**
2255  * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2256  * @udc: Reference to the device controller.
2257  *
2258  * debugfs_remove() is safe to call with a NULL argument.
2259  */
2260 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2261 {
2262     debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
2263 }
2264 
2265 /***********************************************************************
2266  * Driver init/exit
2267  ***********************************************************************/
2268 
2269 /**
2270  * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2271  * @pdev: Platform device struct from the bcm63xx BSP code.
2272  *
2273  * Note that platform data is required, because pd.port_no varies from chip
2274  * to chip and is used to switch the correct USB port to device mode.
2275  */
2276 static int bcm63xx_udc_probe(struct platform_device *pdev)
2277 {
2278     struct device *dev = &pdev->dev;
2279     struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2280     struct bcm63xx_udc *udc;
2281     int rc = -ENOMEM, i, irq;
2282 
2283     udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2284     if (!udc)
2285         return -ENOMEM;
2286 
2287     platform_set_drvdata(pdev, udc);
2288     udc->dev = dev;
2289     udc->pd = pd;
2290 
2291     if (!pd) {
2292         dev_err(dev, "missing platform data\n");
2293         return -EINVAL;
2294     }
2295 
2296     udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2297     if (IS_ERR(udc->usbd_regs))
2298         return PTR_ERR(udc->usbd_regs);
2299 
2300     udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2301     if (IS_ERR(udc->iudma_regs))
2302         return PTR_ERR(udc->iudma_regs);
2303 
2304     spin_lock_init(&udc->lock);
2305     INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2306 
2307     udc->gadget.ops = &bcm63xx_udc_ops;
2308     udc->gadget.name = dev_name(dev);
2309 
2310     if (!pd->use_fullspeed && !use_fullspeed)
2311         udc->gadget.max_speed = USB_SPEED_HIGH;
2312     else
2313         udc->gadget.max_speed = USB_SPEED_FULL;
2314 
2315     /* request clocks, allocate buffers, and clear any pending IRQs */
2316     rc = bcm63xx_init_udc_hw(udc);
2317     if (rc)
2318         return rc;
2319 
2320     rc = -ENXIO;
2321 
2322     /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2323     irq = platform_get_irq(pdev, 0);
2324     if (irq < 0) {
2325         rc = irq;
2326         goto out_uninit;
2327     }
2328     if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2329                  dev_name(dev), udc) < 0)
2330         goto report_request_failure;
2331 
2332     /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2333     for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2334         irq = platform_get_irq(pdev, i + 1);
2335         if (irq < 0) {
2336             rc = irq;
2337             goto out_uninit;
2338         }
2339         if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2340                      dev_name(dev), &udc->iudma[i]) < 0)
2341             goto report_request_failure;
2342     }
2343 
2344     bcm63xx_udc_init_debugfs(udc);
2345     rc = usb_add_gadget_udc(dev, &udc->gadget);
2346     if (!rc)
2347         return 0;
2348 
2349     bcm63xx_udc_cleanup_debugfs(udc);
2350 out_uninit:
2351     bcm63xx_uninit_udc_hw(udc);
2352     return rc;
2353 
2354 report_request_failure:
2355     dev_err(dev, "error requesting IRQ #%d\n", irq);
2356     goto out_uninit;
2357 }
2358 
2359 /**
2360  * bcm63xx_udc_remove - Remove the device from the system.
2361  * @pdev: Platform device struct from the bcm63xx BSP code.
2362  */
2363 static int bcm63xx_udc_remove(struct platform_device *pdev)
2364 {
2365     struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2366 
2367     bcm63xx_udc_cleanup_debugfs(udc);
2368     usb_del_gadget_udc(&udc->gadget);
2369     BUG_ON(udc->driver);
2370 
2371     bcm63xx_uninit_udc_hw(udc);
2372 
2373     return 0;
2374 }
2375 
2376 static struct platform_driver bcm63xx_udc_driver = {
2377     .probe      = bcm63xx_udc_probe,
2378     .remove     = bcm63xx_udc_remove,
2379     .driver     = {
2380         .name   = DRV_MODULE_NAME,
2381     },
2382 };
2383 module_platform_driver(bcm63xx_udc_driver);
2384 
2385 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2386 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2387 MODULE_LICENSE("GPL");
2388 MODULE_ALIAS("platform:" DRV_MODULE_NAME);