Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Cadence CDNSP DRD Driver.
0004  *
0005  * Copyright (C) 2020 Cadence.
0006  *
0007  * Author: Pawel Laszczak <pawell@cadence.com>
0008  *
0009  */
0010 
0011 #include <linux/moduleparam.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/module.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/delay.h>
0016 #include <linux/log2.h>
0017 #include <linux/slab.h>
0018 #include <linux/pci.h>
0019 #include <linux/irq.h>
0020 #include <linux/dmi.h>
0021 
0022 #include "core.h"
0023 #include "gadget-export.h"
0024 #include "drd.h"
0025 #include "cdnsp-gadget.h"
0026 #include "cdnsp-trace.h"
0027 
0028 unsigned int cdnsp_port_speed(unsigned int port_status)
0029 {
0030     /*Detect gadget speed based on PORTSC register*/
0031     if (DEV_SUPERSPEEDPLUS(port_status))
0032         return USB_SPEED_SUPER_PLUS;
0033     else if (DEV_SUPERSPEED(port_status))
0034         return USB_SPEED_SUPER;
0035     else if (DEV_HIGHSPEED(port_status))
0036         return USB_SPEED_HIGH;
0037     else if (DEV_FULLSPEED(port_status))
0038         return USB_SPEED_FULL;
0039 
0040     /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
0041     return USB_SPEED_UNKNOWN;
0042 }
0043 
0044 /*
0045  * Given a port state, this function returns a value that would result in the
0046  * port being in the same state, if the value was written to the port status
0047  * control register.
0048  * Save Read Only (RO) bits and save read/write bits where
0049  * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
0050  * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
0051  */
0052 u32 cdnsp_port_state_to_neutral(u32 state)
0053 {
0054     /* Save read-only status and port state. */
0055     return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
0056 }
0057 
0058 /**
0059  * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities
0060  *                           with capability ID id.
0061  * @base: PCI MMIO registers base address.
0062  * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
0063  *         beginning of list)
0064  * @id: Extended capability ID to search for.
0065  *
0066  * Returns the offset of the next matching extended capability structure.
0067  * Some capabilities can occur several times,
0068  * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
0069  */
0070 int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
0071 {
0072     u32 offset = start;
0073     u32 next;
0074     u32 val;
0075 
0076     if (!start || start == HCC_PARAMS_OFFSET) {
0077         val = readl(base + HCC_PARAMS_OFFSET);
0078         if (val == ~0)
0079             return 0;
0080 
0081         offset = HCC_EXT_CAPS(val) << 2;
0082         if (!offset)
0083             return 0;
0084     }
0085 
0086     do {
0087         val = readl(base + offset);
0088         if (val == ~0)
0089             return 0;
0090 
0091         if (EXT_CAPS_ID(val) == id && offset != start)
0092             return offset;
0093 
0094         next = EXT_CAPS_NEXT(val);
0095         offset += next << 2;
0096     } while (next);
0097 
0098     return 0;
0099 }
0100 
0101 void cdnsp_set_link_state(struct cdnsp_device *pdev,
0102               __le32 __iomem *port_regs,
0103               u32 link_state)
0104 {
0105     int port_num = 0xFF;
0106     u32 temp;
0107 
0108     temp = readl(port_regs);
0109     temp = cdnsp_port_state_to_neutral(temp);
0110     temp |= PORT_WKCONN_E | PORT_WKDISC_E;
0111     writel(temp, port_regs);
0112 
0113     temp &= ~PORT_PLS_MASK;
0114     temp |= PORT_LINK_STROBE | link_state;
0115 
0116     if (pdev->active_port)
0117         port_num = pdev->active_port->port_num;
0118 
0119     trace_cdnsp_handle_port_status(port_num, readl(port_regs));
0120     writel(temp, port_regs);
0121     trace_cdnsp_link_state_changed(port_num, readl(port_regs));
0122 }
0123 
0124 static void cdnsp_disable_port(struct cdnsp_device *pdev,
0125                    __le32 __iomem *port_regs)
0126 {
0127     u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
0128 
0129     writel(temp | PORT_PED, port_regs);
0130 }
0131 
0132 static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
0133                     __le32 __iomem *port_regs)
0134 {
0135     u32 portsc = readl(port_regs);
0136 
0137     writel(cdnsp_port_state_to_neutral(portsc) |
0138            (portsc & PORT_CHANGE_BITS), port_regs);
0139 }
0140 
0141 static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
0142 {
0143     __le32 __iomem *reg;
0144     void __iomem *base;
0145     u32 offset = 0;
0146 
0147     base = &pdev->cap_regs->hc_capbase;
0148     offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
0149     reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
0150 
0151     bit = readl(reg) | bit;
0152     writel(bit, reg);
0153 }
0154 
0155 static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
0156 {
0157     __le32 __iomem *reg;
0158     void __iomem *base;
0159     u32 offset = 0;
0160 
0161     base = &pdev->cap_regs->hc_capbase;
0162     offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
0163     reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
0164 
0165     bit = readl(reg) & ~bit;
0166     writel(bit, reg);
0167 }
0168 
0169 /*
0170  * Disable interrupts and begin the controller halting process.
0171  */
0172 static void cdnsp_quiesce(struct cdnsp_device *pdev)
0173 {
0174     u32 halted;
0175     u32 mask;
0176     u32 cmd;
0177 
0178     mask = ~(u32)(CDNSP_IRQS);
0179 
0180     halted = readl(&pdev->op_regs->status) & STS_HALT;
0181     if (!halted)
0182         mask &= ~(CMD_R_S | CMD_DEVEN);
0183 
0184     cmd = readl(&pdev->op_regs->command);
0185     cmd &= mask;
0186     writel(cmd, &pdev->op_regs->command);
0187 }
0188 
0189 /*
0190  * Force controller into halt state.
0191  *
0192  * Disable any IRQs and clear the run/stop bit.
0193  * Controller will complete any current and actively pipelined transactions, and
0194  * should halt within 16 ms of the run/stop bit being cleared.
0195  * Read controller Halted bit in the status register to see when the
0196  * controller is finished.
0197  */
0198 int cdnsp_halt(struct cdnsp_device *pdev)
0199 {
0200     int ret;
0201     u32 val;
0202 
0203     cdnsp_quiesce(pdev);
0204 
0205     ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
0206                     val & STS_HALT, 1,
0207                     CDNSP_MAX_HALT_USEC);
0208     if (ret) {
0209         dev_err(pdev->dev, "ERROR: Device halt failed\n");
0210         return ret;
0211     }
0212 
0213     pdev->cdnsp_state |= CDNSP_STATE_HALTED;
0214 
0215     return 0;
0216 }
0217 
0218 /*
0219  * device controller died, register read returns 0xffffffff, or command never
0220  * ends.
0221  */
0222 void cdnsp_died(struct cdnsp_device *pdev)
0223 {
0224     dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
0225     pdev->cdnsp_state |= CDNSP_STATE_DYING;
0226     cdnsp_halt(pdev);
0227 }
0228 
0229 /*
0230  * Set the run bit and wait for the device to be running.
0231  */
0232 static int cdnsp_start(struct cdnsp_device *pdev)
0233 {
0234     u32 temp;
0235     int ret;
0236 
0237     temp = readl(&pdev->op_regs->command);
0238     temp |= (CMD_R_S | CMD_DEVEN);
0239     writel(temp, &pdev->op_regs->command);
0240 
0241     pdev->cdnsp_state = 0;
0242 
0243     /*
0244      * Wait for the STS_HALT Status bit to be 0 to indicate the device is
0245      * running.
0246      */
0247     ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
0248                     !(temp & STS_HALT), 1,
0249                     CDNSP_MAX_HALT_USEC);
0250     if (ret) {
0251         pdev->cdnsp_state = CDNSP_STATE_DYING;
0252         dev_err(pdev->dev, "ERROR: Controller run failed\n");
0253     }
0254 
0255     return ret;
0256 }
0257 
0258 /*
0259  * Reset a halted controller.
0260  *
0261  * This resets pipelines, timers, counters, state machines, etc.
0262  * Transactions will be terminated immediately, and operational registers
0263  * will be set to their defaults.
0264  */
0265 int cdnsp_reset(struct cdnsp_device *pdev)
0266 {
0267     u32 command;
0268     u32 temp;
0269     int ret;
0270 
0271     temp = readl(&pdev->op_regs->status);
0272 
0273     if (temp == ~(u32)0) {
0274         dev_err(pdev->dev, "Device not accessible, reset failed.\n");
0275         return -ENODEV;
0276     }
0277 
0278     if ((temp & STS_HALT) == 0) {
0279         dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
0280         return -EINVAL;
0281     }
0282 
0283     command = readl(&pdev->op_regs->command);
0284     command |= CMD_RESET;
0285     writel(command, &pdev->op_regs->command);
0286 
0287     ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
0288                     !(temp & CMD_RESET), 1,
0289                     10 * 1000);
0290     if (ret) {
0291         dev_err(pdev->dev, "ERROR: Controller reset failed\n");
0292         return ret;
0293     }
0294 
0295     /*
0296      * CDNSP cannot write any doorbells or operational registers other
0297      * than status until the "Controller Not Ready" flag is cleared.
0298      */
0299     ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
0300                     !(temp & STS_CNR), 1,
0301                     10 * 1000);
0302 
0303     if (ret) {
0304         dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
0305         return ret;
0306     }
0307 
0308     dev_dbg(pdev->dev, "Controller ready to work");
0309 
0310     return ret;
0311 }
0312 
0313 /*
0314  * cdnsp_get_endpoint_index - Find the index for an endpoint given its
0315  * descriptor.Use the return value to right shift 1 for the bitmask.
0316  *
0317  * Index = (epnum * 2) + direction - 1,
0318  * where direction = 0 for OUT, 1 for IN.
0319  * For control endpoints, the IN index is used (OUT index is unused), so
0320  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
0321  */
0322 static unsigned int
0323     cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
0324 {
0325     unsigned int index = (unsigned int)usb_endpoint_num(desc);
0326 
0327     if (usb_endpoint_xfer_control(desc))
0328         return index * 2;
0329 
0330     return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
0331 }
0332 
0333 /*
0334  * Find the flag for this endpoint (for use in the control context). Use the
0335  * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
0336  * bit 1, etc.
0337  */
0338 static unsigned int
0339     cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
0340 {
0341     return 1 << (cdnsp_get_endpoint_index(desc) + 1);
0342 }
0343 
0344 int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
0345 {
0346     struct cdnsp_device *pdev = pep->pdev;
0347     struct usb_request *request;
0348     int ret;
0349 
0350     if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
0351         trace_cdnsp_request_enqueue_busy(preq);
0352         return -EBUSY;
0353     }
0354 
0355     request = &preq->request;
0356     request->actual = 0;
0357     request->status = -EINPROGRESS;
0358     preq->direction = pep->direction;
0359     preq->epnum = pep->number;
0360     preq->td.drbl = 0;
0361 
0362     ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
0363     if (ret) {
0364         trace_cdnsp_request_enqueue_error(preq);
0365         return ret;
0366     }
0367 
0368     list_add_tail(&preq->list, &pep->pending_list);
0369 
0370     trace_cdnsp_request_enqueue(preq);
0371 
0372     switch (usb_endpoint_type(pep->endpoint.desc)) {
0373     case USB_ENDPOINT_XFER_CONTROL:
0374         ret = cdnsp_queue_ctrl_tx(pdev, preq);
0375         break;
0376     case USB_ENDPOINT_XFER_BULK:
0377     case USB_ENDPOINT_XFER_INT:
0378         ret = cdnsp_queue_bulk_tx(pdev, preq);
0379         break;
0380     case USB_ENDPOINT_XFER_ISOC:
0381         ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
0382     }
0383 
0384     if (ret)
0385         goto unmap;
0386 
0387     return 0;
0388 
0389 unmap:
0390     usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
0391                     pep->direction);
0392     list_del(&preq->list);
0393     trace_cdnsp_request_enqueue_error(preq);
0394 
0395     return ret;
0396 }
0397 
0398 /*
0399  * Remove the request's TD from the endpoint ring. This may cause the
0400  * controller to stop USB transfers, potentially stopping in the middle of a
0401  * TRB buffer. The controller should pick up where it left off in the TD,
0402  * unless a Set Transfer Ring Dequeue Pointer is issued.
0403  *
0404  * The TRBs that make up the buffers for the canceled request will be "removed"
0405  * from the ring. Since the ring is a contiguous structure, they can't be
0406  * physically removed. Instead, there are two options:
0407  *
0408  *  1) If the controller is in the middle of processing the request to be
0409  *     canceled, we simply move the ring's dequeue pointer past those TRBs
0410  *     using the Set Transfer Ring Dequeue Pointer command. This will be
0411  *     the common case, when drivers timeout on the last submitted request
0412  *     and attempt to cancel.
0413  *
0414  *  2) If the controller is in the middle of a different TD, we turn the TRBs
0415  *     into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
0416  *     The controller will need to invalidate the any TRBs it has cached after
0417  *     the stop endpoint command.
0418  *
0419  *  3) The TD may have completed by the time the Stop Endpoint Command
0420  *     completes, so software needs to handle that case too.
0421  *
0422  */
0423 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
0424 {
0425     struct cdnsp_device *pdev = pep->pdev;
0426     int ret_stop = 0;
0427     int ret_rem;
0428 
0429     trace_cdnsp_request_dequeue(preq);
0430 
0431     if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
0432         ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
0433 
0434     ret_rem = cdnsp_remove_request(pdev, preq, pep);
0435 
0436     return ret_rem ? ret_rem : ret_stop;
0437 }
0438 
0439 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
0440 {
0441     struct cdnsp_input_control_ctx *ctrl_ctx;
0442     struct cdnsp_slot_ctx *slot_ctx;
0443     struct cdnsp_ep_ctx *ep_ctx;
0444     int i;
0445 
0446     ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
0447 
0448     /*
0449      * When a device's add flag and drop flag are zero, any subsequent
0450      * configure endpoint command will leave that endpoint's state
0451      * untouched. Make sure we don't leave any old state in the input
0452      * endpoint contexts.
0453      */
0454     ctrl_ctx->drop_flags = 0;
0455     ctrl_ctx->add_flags = 0;
0456     slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
0457     slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
0458 
0459     /* Endpoint 0 is always valid */
0460     slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
0461     for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
0462         ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
0463         ep_ctx->ep_info = 0;
0464         ep_ctx->ep_info2 = 0;
0465         ep_ctx->deq = 0;
0466         ep_ctx->tx_info = 0;
0467     }
0468 }
0469 
0470 /* Issue a configure endpoint command and wait for it to finish. */
0471 static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
0472 {
0473     int ret;
0474 
0475     cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
0476     cdnsp_ring_cmd_db(pdev);
0477     ret = cdnsp_wait_for_cmd_compl(pdev);
0478     if (ret) {
0479         dev_err(pdev->dev,
0480             "ERR: unexpected command completion code 0x%x.\n", ret);
0481         return -EINVAL;
0482     }
0483 
0484     return ret;
0485 }
0486 
0487 static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
0488                        struct cdnsp_ep *pep)
0489 {
0490     struct cdnsp_segment *segment;
0491     union cdnsp_trb *event;
0492     u32 cycle_state;
0493     u32  data;
0494 
0495     event = pdev->event_ring->dequeue;
0496     segment = pdev->event_ring->deq_seg;
0497     cycle_state = pdev->event_ring->cycle_state;
0498 
0499     while (1) {
0500         data = le32_to_cpu(event->trans_event.flags);
0501 
0502         /* Check the owner of the TRB. */
0503         if ((data & TRB_CYCLE) != cycle_state)
0504             break;
0505 
0506         if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
0507             TRB_TO_EP_ID(data) == (pep->idx + 1)) {
0508             data |= TRB_EVENT_INVALIDATE;
0509             event->trans_event.flags = cpu_to_le32(data);
0510         }
0511 
0512         if (cdnsp_last_trb_on_seg(segment, event)) {
0513             cycle_state ^= 1;
0514             segment = pdev->event_ring->deq_seg->next;
0515             event = segment->trbs;
0516         } else {
0517             event++;
0518         }
0519     }
0520 }
0521 
0522 int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
0523 {
0524     struct cdnsp_segment *event_deq_seg;
0525     union cdnsp_trb *cmd_trb;
0526     dma_addr_t cmd_deq_dma;
0527     union cdnsp_trb *event;
0528     u32 cycle_state;
0529     int ret, val;
0530     u64 cmd_dma;
0531     u32  flags;
0532 
0533     cmd_trb = pdev->cmd.command_trb;
0534     pdev->cmd.status = 0;
0535 
0536     trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
0537 
0538     ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
0539                     !CMD_RING_BUSY(val), 1,
0540                     CDNSP_CMD_TIMEOUT);
0541     if (ret) {
0542         dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
0543         trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
0544         pdev->cdnsp_state = CDNSP_STATE_DYING;
0545         return -ETIMEDOUT;
0546     }
0547 
0548     event = pdev->event_ring->dequeue;
0549     event_deq_seg = pdev->event_ring->deq_seg;
0550     cycle_state = pdev->event_ring->cycle_state;
0551 
0552     cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
0553     if (!cmd_deq_dma)
0554         return -EINVAL;
0555 
0556     while (1) {
0557         flags = le32_to_cpu(event->event_cmd.flags);
0558 
0559         /* Check the owner of the TRB. */
0560         if ((flags & TRB_CYCLE) != cycle_state)
0561             return -EINVAL;
0562 
0563         cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
0564 
0565         /*
0566          * Check whether the completion event is for last queued
0567          * command.
0568          */
0569         if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
0570             cmd_dma != (u64)cmd_deq_dma) {
0571             if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
0572                 event++;
0573                 continue;
0574             }
0575 
0576             if (cdnsp_last_trb_on_ring(pdev->event_ring,
0577                            event_deq_seg, event))
0578                 cycle_state ^= 1;
0579 
0580             event_deq_seg = event_deq_seg->next;
0581             event = event_deq_seg->trbs;
0582             continue;
0583         }
0584 
0585         trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
0586 
0587         pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
0588         if (pdev->cmd.status == COMP_SUCCESS)
0589             return 0;
0590 
0591         return -pdev->cmd.status;
0592     }
0593 }
0594 
0595 int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
0596             struct cdnsp_ep *pep,
0597             int value)
0598 {
0599     int ret;
0600 
0601     trace_cdnsp_ep_halt(value ? "Set" : "Clear");
0602 
0603     if (value) {
0604         ret = cdnsp_cmd_stop_ep(pdev, pep);
0605         if (ret)
0606             return ret;
0607 
0608         if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
0609             cdnsp_queue_halt_endpoint(pdev, pep->idx);
0610             cdnsp_ring_cmd_db(pdev);
0611             ret = cdnsp_wait_for_cmd_compl(pdev);
0612         }
0613 
0614         pep->ep_state |= EP_HALTED;
0615     } else {
0616         /*
0617          * In device mode driver can call reset endpoint command
0618          * from any endpoint state.
0619          */
0620         cdnsp_queue_reset_ep(pdev, pep->idx);
0621         cdnsp_ring_cmd_db(pdev);
0622         ret = cdnsp_wait_for_cmd_compl(pdev);
0623         trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
0624 
0625         if (ret)
0626             return ret;
0627 
0628         pep->ep_state &= ~EP_HALTED;
0629 
0630         if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
0631             cdnsp_ring_doorbell_for_active_rings(pdev, pep);
0632 
0633         pep->ep_state &= ~EP_WEDGE;
0634     }
0635 
0636     return 0;
0637 }
0638 
0639 static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
0640                       struct cdnsp_ep *pep)
0641 {
0642     struct cdnsp_input_control_ctx *ctrl_ctx;
0643     struct cdnsp_slot_ctx *slot_ctx;
0644     int ret = 0;
0645     u32 ep_sts;
0646     int i;
0647 
0648     ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
0649 
0650     /* Don't issue the command if there's no endpoints to update. */
0651     if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
0652         return 0;
0653 
0654     ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
0655     ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
0656     ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
0657 
0658     /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
0659     slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
0660     for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
0661         __le32 le32 = cpu_to_le32(BIT(i));
0662 
0663         if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
0664             (ctrl_ctx->add_flags & le32) || i == 1) {
0665             slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
0666             slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
0667             break;
0668         }
0669     }
0670 
0671     ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
0672 
0673     if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
0674          ep_sts == EP_STATE_DISABLED) ||
0675         (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
0676         ret = cdnsp_configure_endpoint(pdev);
0677 
0678     trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
0679     trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
0680 
0681     cdnsp_zero_in_ctx(pdev);
0682 
0683     return ret;
0684 }
0685 
0686 /*
0687  * This submits a Reset Device Command, which will set the device state to 0,
0688  * set the device address to 0, and disable all the endpoints except the default
0689  * control endpoint. The USB core should come back and call
0690  * cdnsp_setup_device(), and then re-set up the configuration.
0691  */
0692 int cdnsp_reset_device(struct cdnsp_device *pdev)
0693 {
0694     struct cdnsp_slot_ctx *slot_ctx;
0695     int slot_state;
0696     int ret, i;
0697 
0698     slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
0699     slot_ctx->dev_info = 0;
0700     pdev->device_address = 0;
0701 
0702     /* If device is not setup, there is no point in resetting it. */
0703     slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
0704     slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
0705     trace_cdnsp_reset_device(slot_ctx);
0706 
0707     if (slot_state <= SLOT_STATE_DEFAULT &&
0708         pdev->eps[0].ep_state & EP_HALTED) {
0709         cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
0710     }
0711 
0712     /*
0713      * During Reset Device command controller shall transition the
0714      * endpoint ep0 to the Running State.
0715      */
0716     pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
0717     pdev->eps[0].ep_state |= EP_ENABLED;
0718 
0719     if (slot_state <= SLOT_STATE_DEFAULT)
0720         return 0;
0721 
0722     cdnsp_queue_reset_device(pdev);
0723     cdnsp_ring_cmd_db(pdev);
0724     ret = cdnsp_wait_for_cmd_compl(pdev);
0725 
0726     /*
0727      * After Reset Device command all not default endpoints
0728      * are in Disabled state.
0729      */
0730     for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
0731         pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
0732 
0733     trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
0734 
0735     if (ret)
0736         dev_err(pdev->dev, "Reset device failed with error code %d",
0737             ret);
0738 
0739     return ret;
0740 }
0741 
0742 /*
0743  * Sets the MaxPStreams field and the Linear Stream Array field.
0744  * Sets the dequeue pointer to the stream context array.
0745  */
0746 static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
0747                          struct cdnsp_ep_ctx *ep_ctx,
0748                          struct cdnsp_stream_info *stream_info)
0749 {
0750     u32 max_primary_streams;
0751 
0752     /* MaxPStreams is the number of stream context array entries, not the
0753      * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
0754      * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
0755      */
0756     max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
0757     ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
0758     ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
0759                        | EP_HAS_LSA);
0760     ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
0761 }
0762 
0763 /*
0764  * The drivers use this function to prepare a bulk endpoints to use streams.
0765  *
0766  * Don't allow the call to succeed if endpoint only supports one stream
0767  * (which means it doesn't support streams at all).
0768  */
0769 int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
0770 {
0771     unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
0772     unsigned int num_stream_ctxs;
0773     int ret;
0774 
0775     if (num_streams ==  0)
0776         return 0;
0777 
0778     if (num_streams > STREAM_NUM_STREAMS)
0779         return -EINVAL;
0780 
0781     /*
0782      * Add two to the number of streams requested to account for
0783      * stream 0 that is reserved for controller usage and one additional
0784      * for TASK SET FULL response.
0785      */
0786     num_streams += 2;
0787 
0788     /* The stream context array size must be a power of two */
0789     num_stream_ctxs = roundup_pow_of_two(num_streams);
0790 
0791     trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
0792 
0793     ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
0794     if (ret)
0795         return ret;
0796 
0797     cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
0798 
0799     pep->ep_state |= EP_HAS_STREAMS;
0800     pep->stream_info.td_count = 0;
0801     pep->stream_info.first_prime_det = 0;
0802 
0803     /* Subtract 1 for stream 0, which drivers can't use. */
0804     return num_streams - 1;
0805 }
0806 
0807 int cdnsp_disable_slot(struct cdnsp_device *pdev)
0808 {
0809     int ret;
0810 
0811     cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
0812     cdnsp_ring_cmd_db(pdev);
0813     ret = cdnsp_wait_for_cmd_compl(pdev);
0814 
0815     pdev->slot_id = 0;
0816     pdev->active_port = NULL;
0817 
0818     trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
0819 
0820     memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
0821     memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
0822 
0823     return ret;
0824 }
0825 
0826 int cdnsp_enable_slot(struct cdnsp_device *pdev)
0827 {
0828     struct cdnsp_slot_ctx *slot_ctx;
0829     int slot_state;
0830     int ret;
0831 
0832     /* If device is not setup, there is no point in resetting it */
0833     slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
0834     slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
0835 
0836     if (slot_state != SLOT_STATE_DISABLED)
0837         return 0;
0838 
0839     cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
0840     cdnsp_ring_cmd_db(pdev);
0841     ret = cdnsp_wait_for_cmd_compl(pdev);
0842     if (ret)
0843         goto show_trace;
0844 
0845     pdev->slot_id = 1;
0846 
0847 show_trace:
0848     trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
0849 
0850     return ret;
0851 }
0852 
0853 /*
0854  * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
0855  * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
0856  */
0857 int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
0858 {
0859     struct cdnsp_input_control_ctx *ctrl_ctx;
0860     struct cdnsp_slot_ctx *slot_ctx;
0861     int dev_state = 0;
0862     int ret;
0863 
0864     if (!pdev->slot_id) {
0865         trace_cdnsp_slot_id("incorrect");
0866         return -EINVAL;
0867     }
0868 
0869     if (!pdev->active_port->port_num)
0870         return -EINVAL;
0871 
0872     slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
0873     dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
0874 
0875     if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
0876         trace_cdnsp_slot_already_in_default(slot_ctx);
0877         return 0;
0878     }
0879 
0880     slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
0881     ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
0882 
0883     if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
0884         ret = cdnsp_setup_addressable_priv_dev(pdev);
0885         if (ret)
0886             return ret;
0887     }
0888 
0889     cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
0890 
0891     ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
0892     ctrl_ctx->drop_flags = 0;
0893 
0894     trace_cdnsp_setup_device_slot(slot_ctx);
0895 
0896     cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
0897     cdnsp_ring_cmd_db(pdev);
0898     ret = cdnsp_wait_for_cmd_compl(pdev);
0899 
0900     trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
0901 
0902     /* Zero the input context control for later use. */
0903     ctrl_ctx->add_flags = 0;
0904     ctrl_ctx->drop_flags = 0;
0905 
0906     return ret;
0907 }
0908 
0909 void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
0910                  struct usb_request *req,
0911                  int enable)
0912 {
0913     if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
0914         return;
0915 
0916     trace_cdnsp_lpm(enable);
0917 
0918     if (enable)
0919         writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
0920                &pdev->active_port->regs->portpmsc);
0921     else
0922         writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
0923 }
0924 
0925 static int cdnsp_get_frame(struct cdnsp_device *pdev)
0926 {
0927     return readl(&pdev->run_regs->microframe_index) >> 3;
0928 }
0929 
0930 static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
0931                   const struct usb_endpoint_descriptor *desc)
0932 {
0933     struct cdnsp_input_control_ctx *ctrl_ctx;
0934     struct cdnsp_device *pdev;
0935     struct cdnsp_ep *pep;
0936     unsigned long flags;
0937     u32 added_ctxs;
0938     int ret;
0939 
0940     if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
0941         !desc->wMaxPacketSize)
0942         return -EINVAL;
0943 
0944     pep = to_cdnsp_ep(ep);
0945     pdev = pep->pdev;
0946     pep->ep_state &= ~EP_UNCONFIGURED;
0947 
0948     if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
0949               "%s is already enabled\n", pep->name))
0950         return 0;
0951 
0952     spin_lock_irqsave(&pdev->lock, flags);
0953 
0954     added_ctxs = cdnsp_get_endpoint_flag(desc);
0955     if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
0956         dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
0957         ret = -EINVAL;
0958         goto unlock;
0959     }
0960 
0961     pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
0962 
0963     if (pdev->gadget.speed == USB_SPEED_FULL) {
0964         if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
0965             pep->interval = desc->bInterval << 3;
0966         if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
0967             pep->interval = BIT(desc->bInterval - 1) << 3;
0968     }
0969 
0970     if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
0971         if (pep->interval > BIT(12)) {
0972             dev_err(pdev->dev, "bInterval %d not supported\n",
0973                 desc->bInterval);
0974             ret = -EINVAL;
0975             goto unlock;
0976         }
0977         cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
0978     }
0979 
0980     ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
0981     if (ret)
0982         goto unlock;
0983 
0984     ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
0985     ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
0986     ctrl_ctx->drop_flags = 0;
0987 
0988     ret = cdnsp_update_eps_configuration(pdev, pep);
0989     if (ret) {
0990         cdnsp_free_endpoint_rings(pdev, pep);
0991         goto unlock;
0992     }
0993 
0994     pep->ep_state |= EP_ENABLED;
0995     pep->ep_state &= ~EP_STOPPED;
0996 
0997 unlock:
0998     trace_cdnsp_ep_enable_end(pep, 0);
0999     spin_unlock_irqrestore(&pdev->lock, flags);
1000 
1001     return ret;
1002 }
1003 
1004 static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
1005 {
1006     struct cdnsp_input_control_ctx *ctrl_ctx;
1007     struct cdnsp_request *preq;
1008     struct cdnsp_device *pdev;
1009     struct cdnsp_ep *pep;
1010     unsigned long flags;
1011     u32 drop_flag;
1012     int ret = 0;
1013 
1014     if (!ep)
1015         return -EINVAL;
1016 
1017     pep = to_cdnsp_ep(ep);
1018     pdev = pep->pdev;
1019 
1020     spin_lock_irqsave(&pdev->lock, flags);
1021 
1022     if (!(pep->ep_state & EP_ENABLED)) {
1023         dev_err(pdev->dev, "%s is already disabled\n", pep->name);
1024         ret = -EINVAL;
1025         goto finish;
1026     }
1027 
1028     pep->ep_state |= EP_DIS_IN_RROGRESS;
1029 
1030     /* Endpoint was unconfigured by Reset Device command. */
1031     if (!(pep->ep_state & EP_UNCONFIGURED)) {
1032         cdnsp_cmd_stop_ep(pdev, pep);
1033         cdnsp_cmd_flush_ep(pdev, pep);
1034     }
1035 
1036     /* Remove all queued USB requests. */
1037     while (!list_empty(&pep->pending_list)) {
1038         preq = next_request(&pep->pending_list);
1039         cdnsp_ep_dequeue(pep, preq);
1040     }
1041 
1042     cdnsp_invalidate_ep_events(pdev, pep);
1043 
1044     pep->ep_state &= ~EP_DIS_IN_RROGRESS;
1045     drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
1046     ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
1047     ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
1048     ctrl_ctx->add_flags = 0;
1049 
1050     cdnsp_endpoint_zero(pdev, pep);
1051 
1052     if (!(pep->ep_state & EP_UNCONFIGURED))
1053         ret = cdnsp_update_eps_configuration(pdev, pep);
1054 
1055     cdnsp_free_endpoint_rings(pdev, pep);
1056 
1057     pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
1058     pep->ep_state |= EP_STOPPED;
1059 
1060 finish:
1061     trace_cdnsp_ep_disable_end(pep, 0);
1062     spin_unlock_irqrestore(&pdev->lock, flags);
1063 
1064     return ret;
1065 }
1066 
1067 static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
1068                              gfp_t gfp_flags)
1069 {
1070     struct cdnsp_ep *pep = to_cdnsp_ep(ep);
1071     struct cdnsp_request *preq;
1072 
1073     preq = kzalloc(sizeof(*preq), gfp_flags);
1074     if (!preq)
1075         return NULL;
1076 
1077     preq->epnum = pep->number;
1078     preq->pep = pep;
1079 
1080     trace_cdnsp_alloc_request(preq);
1081 
1082     return &preq->request;
1083 }
1084 
1085 static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
1086                      struct usb_request *request)
1087 {
1088     struct cdnsp_request *preq = to_cdnsp_request(request);
1089 
1090     trace_cdnsp_free_request(preq);
1091     kfree(preq);
1092 }
1093 
1094 static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
1095                  struct usb_request *request,
1096                  gfp_t gfp_flags)
1097 {
1098     struct cdnsp_request *preq;
1099     struct cdnsp_device *pdev;
1100     struct cdnsp_ep *pep;
1101     unsigned long flags;
1102     int ret;
1103 
1104     if (!request || !ep)
1105         return -EINVAL;
1106 
1107     pep = to_cdnsp_ep(ep);
1108     pdev = pep->pdev;
1109 
1110     if (!(pep->ep_state & EP_ENABLED)) {
1111         dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
1112             pep->name);
1113         return -EINVAL;
1114     }
1115 
1116     preq = to_cdnsp_request(request);
1117     spin_lock_irqsave(&pdev->lock, flags);
1118     ret = cdnsp_ep_enqueue(pep, preq);
1119     spin_unlock_irqrestore(&pdev->lock, flags);
1120 
1121     return ret;
1122 }
1123 
1124 static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
1125                    struct usb_request *request)
1126 {
1127     struct cdnsp_ep *pep = to_cdnsp_ep(ep);
1128     struct cdnsp_device *pdev = pep->pdev;
1129     unsigned long flags;
1130     int ret;
1131 
1132     if (!pep->endpoint.desc) {
1133         dev_err(pdev->dev,
1134             "%s: can't dequeue to disabled endpoint\n",
1135             pep->name);
1136         return -ESHUTDOWN;
1137     }
1138 
1139     /* Requests has been dequeued during disabling endpoint. */
1140     if (!(pep->ep_state & EP_ENABLED))
1141         return 0;
1142 
1143     spin_lock_irqsave(&pdev->lock, flags);
1144     ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
1145     spin_unlock_irqrestore(&pdev->lock, flags);
1146 
1147     return ret;
1148 }
1149 
1150 static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
1151 {
1152     struct cdnsp_ep *pep = to_cdnsp_ep(ep);
1153     struct cdnsp_device *pdev = pep->pdev;
1154     struct cdnsp_request *preq;
1155     unsigned long flags;
1156     int ret;
1157 
1158     spin_lock_irqsave(&pdev->lock, flags);
1159 
1160     preq = next_request(&pep->pending_list);
1161     if (value) {
1162         if (preq) {
1163             trace_cdnsp_ep_busy_try_halt_again(pep, 0);
1164             ret = -EAGAIN;
1165             goto done;
1166         }
1167     }
1168 
1169     ret = cdnsp_halt_endpoint(pdev, pep, value);
1170 
1171 done:
1172     spin_unlock_irqrestore(&pdev->lock, flags);
1173     return ret;
1174 }
1175 
1176 static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
1177 {
1178     struct cdnsp_ep *pep = to_cdnsp_ep(ep);
1179     struct cdnsp_device *pdev = pep->pdev;
1180     unsigned long flags;
1181     int ret;
1182 
1183     spin_lock_irqsave(&pdev->lock, flags);
1184     pep->ep_state |= EP_WEDGE;
1185     ret = cdnsp_halt_endpoint(pdev, pep, 1);
1186     spin_unlock_irqrestore(&pdev->lock, flags);
1187 
1188     return ret;
1189 }
1190 
1191 static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
1192     .enable     = cdnsp_gadget_ep_enable,
1193     .disable    = cdnsp_gadget_ep_disable,
1194     .alloc_request  = cdnsp_gadget_ep_alloc_request,
1195     .free_request   = cdnsp_gadget_ep_free_request,
1196     .queue      = cdnsp_gadget_ep_queue,
1197     .dequeue    = cdnsp_gadget_ep_dequeue,
1198     .set_halt   = cdnsp_gadget_ep_set_halt,
1199     .set_wedge  = cdnsp_gadget_ep_set_wedge,
1200 };
1201 
1202 static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
1203     .enable     = cdnsp_gadget_ep_enable,
1204     .disable    = cdnsp_gadget_ep_disable,
1205     .alloc_request  = cdnsp_gadget_ep_alloc_request,
1206     .free_request   = cdnsp_gadget_ep_free_request,
1207     .queue      = cdnsp_gadget_ep_queue,
1208     .dequeue    = cdnsp_gadget_ep_dequeue,
1209     .set_halt   = cdnsp_gadget_ep_set_halt,
1210     .set_wedge  = cdnsp_gadget_ep_set_wedge,
1211 };
1212 
1213 void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
1214                struct cdnsp_request *preq,
1215                int status)
1216 {
1217     struct cdnsp_device *pdev = pep->pdev;
1218 
1219     list_del(&preq->list);
1220 
1221     if (preq->request.status == -EINPROGRESS)
1222         preq->request.status = status;
1223 
1224     usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
1225                     preq->direction);
1226 
1227     trace_cdnsp_request_giveback(preq);
1228 
1229     if (preq != &pdev->ep0_preq) {
1230         spin_unlock(&pdev->lock);
1231         usb_gadget_giveback_request(&pep->endpoint, &preq->request);
1232         spin_lock(&pdev->lock);
1233     }
1234 }
1235 
1236 static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
1237     .bLength =      USB_DT_ENDPOINT_SIZE,
1238     .bDescriptorType =  USB_DT_ENDPOINT,
1239     .bmAttributes =     USB_ENDPOINT_XFER_CONTROL,
1240 };
1241 
1242 static int cdnsp_run(struct cdnsp_device *pdev,
1243              enum usb_device_speed speed)
1244 {
1245     u32 fs_speed = 0;
1246     u32 temp;
1247     int ret;
1248 
1249     temp = readl(&pdev->ir_set->irq_control);
1250     temp &= ~IMOD_INTERVAL_MASK;
1251     temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
1252     writel(temp, &pdev->ir_set->irq_control);
1253 
1254     temp = readl(&pdev->port3x_regs->mode_addr);
1255 
1256     switch (speed) {
1257     case USB_SPEED_SUPER_PLUS:
1258         temp |= CFG_3XPORT_SSP_SUPPORT;
1259         break;
1260     case USB_SPEED_SUPER:
1261         temp &= ~CFG_3XPORT_SSP_SUPPORT;
1262         break;
1263     case USB_SPEED_HIGH:
1264         break;
1265     case USB_SPEED_FULL:
1266         fs_speed = PORT_REG6_FORCE_FS;
1267         break;
1268     default:
1269         dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
1270             speed);
1271         fallthrough;
1272     case USB_SPEED_UNKNOWN:
1273         /* Default to superspeed. */
1274         speed = USB_SPEED_SUPER;
1275         break;
1276     }
1277 
1278     if (speed >= USB_SPEED_SUPER) {
1279         writel(temp, &pdev->port3x_regs->mode_addr);
1280         cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
1281                      XDEV_RXDETECT);
1282     } else {
1283         cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
1284     }
1285 
1286     cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
1287                  XDEV_RXDETECT);
1288 
1289     cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1290 
1291     writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
1292 
1293     ret = cdnsp_start(pdev);
1294     if (ret) {
1295         ret = -ENODEV;
1296         goto err;
1297     }
1298 
1299     temp = readl(&pdev->op_regs->command);
1300     temp |= (CMD_INTE);
1301     writel(temp, &pdev->op_regs->command);
1302 
1303     temp = readl(&pdev->ir_set->irq_pending);
1304     writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
1305 
1306     trace_cdnsp_init("Controller ready to work");
1307     return 0;
1308 err:
1309     cdnsp_halt(pdev);
1310     return ret;
1311 }
1312 
1313 static int cdnsp_gadget_udc_start(struct usb_gadget *g,
1314                   struct usb_gadget_driver *driver)
1315 {
1316     enum usb_device_speed max_speed = driver->max_speed;
1317     struct cdnsp_device *pdev = gadget_to_cdnsp(g);
1318     unsigned long flags;
1319     int ret;
1320 
1321     spin_lock_irqsave(&pdev->lock, flags);
1322     pdev->gadget_driver = driver;
1323 
1324     /* limit speed if necessary */
1325     max_speed = min(driver->max_speed, g->max_speed);
1326     ret = cdnsp_run(pdev, max_speed);
1327 
1328     spin_unlock_irqrestore(&pdev->lock, flags);
1329 
1330     return ret;
1331 }
1332 
1333 /*
1334  * Update Event Ring Dequeue Pointer:
1335  * - When all events have finished
1336  * - To avoid "Event Ring Full Error" condition
1337  */
1338 void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
1339                    union cdnsp_trb *event_ring_deq,
1340                    u8 clear_ehb)
1341 {
1342     u64 temp_64;
1343     dma_addr_t deq;
1344 
1345     temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
1346 
1347     /* If necessary, update the HW's version of the event ring deq ptr. */
1348     if (event_ring_deq != pdev->event_ring->dequeue) {
1349         deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1350                         pdev->event_ring->dequeue);
1351         temp_64 &= ERST_PTR_MASK;
1352         temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
1353     }
1354 
1355     /* Clear the event handler busy flag (RW1C). */
1356     if (clear_ehb)
1357         temp_64 |= ERST_EHB;
1358     else
1359         temp_64 &= ~ERST_EHB;
1360 
1361     cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
1362 }
1363 
1364 static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
1365 {
1366     struct cdnsp_segment *seg;
1367     u64 val_64;
1368     int i;
1369 
1370     cdnsp_initialize_ring_info(pdev->cmd_ring);
1371 
1372     seg = pdev->cmd_ring->first_seg;
1373     for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
1374         memset(seg->trbs, 0,
1375                sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
1376         seg = seg->next;
1377     }
1378 
1379     /* Set the address in the Command Ring Control register. */
1380     val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
1381     val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
1382          (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
1383          pdev->cmd_ring->cycle_state;
1384     cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
1385 }
1386 
1387 static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
1388 {
1389     struct cdnsp_segment *event_deq_seg;
1390     union cdnsp_trb *event_ring_deq;
1391     union cdnsp_trb *event;
1392     u32 cycle_bit;
1393 
1394     event_ring_deq = pdev->event_ring->dequeue;
1395     event_deq_seg = pdev->event_ring->deq_seg;
1396     event = pdev->event_ring->dequeue;
1397 
1398     /* Update ring dequeue pointer. */
1399     while (1) {
1400         cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
1401 
1402         /* Does the controller or driver own the TRB? */
1403         if (cycle_bit != pdev->event_ring->cycle_state)
1404             break;
1405 
1406         cdnsp_inc_deq(pdev, pdev->event_ring);
1407 
1408         if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
1409             event++;
1410             continue;
1411         }
1412 
1413         if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
1414                        event))
1415             cycle_bit ^= 1;
1416 
1417         event_deq_seg = event_deq_seg->next;
1418         event = event_deq_seg->trbs;
1419     }
1420 
1421     cdnsp_update_erst_dequeue(pdev,  event_ring_deq, 1);
1422 }
1423 
1424 static void cdnsp_stop(struct cdnsp_device *pdev)
1425 {
1426     u32 temp;
1427 
1428     cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
1429 
1430     /* Remove internally queued request for ep0. */
1431     if (!list_empty(&pdev->eps[0].pending_list)) {
1432         struct cdnsp_request *req;
1433 
1434         req = next_request(&pdev->eps[0].pending_list);
1435         if (req == &pdev->ep0_preq)
1436             cdnsp_ep_dequeue(&pdev->eps[0], req);
1437     }
1438 
1439     cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
1440     cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
1441     cdnsp_disable_slot(pdev);
1442     cdnsp_halt(pdev);
1443 
1444     temp = readl(&pdev->op_regs->status);
1445     writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
1446     temp = readl(&pdev->ir_set->irq_pending);
1447     writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
1448 
1449     cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
1450     cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
1451 
1452     /* Clear interrupt line */
1453     temp = readl(&pdev->ir_set->irq_pending);
1454     temp |= IMAN_IP;
1455     writel(temp, &pdev->ir_set->irq_pending);
1456 
1457     cdnsp_consume_all_events(pdev);
1458     cdnsp_clear_cmd_ring(pdev);
1459 
1460     trace_cdnsp_exit("Controller stopped.");
1461 }
1462 
1463 /*
1464  * Stop controller.
1465  * This function is called by the gadget core when the driver is removed.
1466  * Disable slot, disable IRQs, and quiesce the controller.
1467  */
1468 static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
1469 {
1470     struct cdnsp_device *pdev = gadget_to_cdnsp(g);
1471     unsigned long flags;
1472 
1473     spin_lock_irqsave(&pdev->lock, flags);
1474     cdnsp_stop(pdev);
1475     pdev->gadget_driver = NULL;
1476     spin_unlock_irqrestore(&pdev->lock, flags);
1477 
1478     return 0;
1479 }
1480 
1481 static int cdnsp_gadget_get_frame(struct usb_gadget *g)
1482 {
1483     struct cdnsp_device *pdev = gadget_to_cdnsp(g);
1484 
1485     return cdnsp_get_frame(pdev);
1486 }
1487 
1488 static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
1489 {
1490     struct cdnsp_port_regs __iomem *port_regs;
1491     u32 portpm, portsc;
1492 
1493     port_regs = pdev->active_port->regs;
1494     portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
1495 
1496     /* Remote wakeup feature is not enabled by host. */
1497     if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
1498         portpm = readl(&port_regs->portpmsc);
1499 
1500         if (!(portpm & PORT_RWE))
1501             return;
1502     }
1503 
1504     if (portsc == XDEV_U3 && !pdev->may_wakeup)
1505         return;
1506 
1507     cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
1508 
1509     pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
1510 }
1511 
1512 static int cdnsp_gadget_wakeup(struct usb_gadget *g)
1513 {
1514     struct cdnsp_device *pdev = gadget_to_cdnsp(g);
1515     unsigned long flags;
1516 
1517     spin_lock_irqsave(&pdev->lock, flags);
1518     __cdnsp_gadget_wakeup(pdev);
1519     spin_unlock_irqrestore(&pdev->lock, flags);
1520 
1521     return 0;
1522 }
1523 
1524 static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
1525                     int is_selfpowered)
1526 {
1527     struct cdnsp_device *pdev = gadget_to_cdnsp(g);
1528     unsigned long flags;
1529 
1530     spin_lock_irqsave(&pdev->lock, flags);
1531     g->is_selfpowered = !!is_selfpowered;
1532     spin_unlock_irqrestore(&pdev->lock, flags);
1533 
1534     return 0;
1535 }
1536 
1537 static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
1538 {
1539     struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
1540     struct cdns *cdns = dev_get_drvdata(pdev->dev);
1541     unsigned long flags;
1542 
1543     trace_cdnsp_pullup(is_on);
1544 
1545     /*
1546      * Disable events handling while controller is being
1547      * enabled/disabled.
1548      */
1549     disable_irq(cdns->dev_irq);
1550     spin_lock_irqsave(&pdev->lock, flags);
1551 
1552     if (!is_on) {
1553         cdnsp_reset_device(pdev);
1554         cdns_clear_vbus(cdns);
1555     } else {
1556         cdns_set_vbus(cdns);
1557     }
1558 
1559     spin_unlock_irqrestore(&pdev->lock, flags);
1560     enable_irq(cdns->dev_irq);
1561 
1562     return 0;
1563 }
1564 
1565 static const struct usb_gadget_ops cdnsp_gadget_ops = {
1566     .get_frame      = cdnsp_gadget_get_frame,
1567     .wakeup         = cdnsp_gadget_wakeup,
1568     .set_selfpowered    = cdnsp_gadget_set_selfpowered,
1569     .pullup         = cdnsp_gadget_pullup,
1570     .udc_start      = cdnsp_gadget_udc_start,
1571     .udc_stop       = cdnsp_gadget_udc_stop,
1572 };
1573 
1574 static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
1575                    struct cdnsp_ep *pep)
1576 {
1577     void __iomem *reg = &pdev->cap_regs->hc_capbase;
1578     int endpoints;
1579 
1580     reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
1581 
1582     if (!pep->direction) {
1583         pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
1584         pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
1585         pep->buffering = (pep->buffering + 1) / 2;
1586         pep->buffering_period = (pep->buffering_period + 1) / 2;
1587         return;
1588     }
1589 
1590     endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
1591 
1592     /* Set to XBUF_TX_TAG_MASK_0 register. */
1593     reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
1594     /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
1595     reg += pep->number * sizeof(u32) * 2;
1596 
1597     pep->buffering = (readl(reg) + 1) / 2;
1598     pep->buffering_period = pep->buffering;
1599 }
1600 
1601 static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
1602 {
1603     int max_streams = HCC_MAX_PSA(pdev->hcc_params);
1604     struct cdnsp_ep *pep;
1605     int i;
1606 
1607     INIT_LIST_HEAD(&pdev->gadget.ep_list);
1608 
1609     if (max_streams < STREAM_LOG_STREAMS) {
1610         dev_err(pdev->dev, "Stream size %d not supported\n",
1611             max_streams);
1612         return -EINVAL;
1613     }
1614 
1615     max_streams = STREAM_LOG_STREAMS;
1616 
1617     for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
1618         bool direction = !(i & 1); /* Start from OUT endpoint. */
1619         u8 epnum = ((i + 1) >> 1);
1620 
1621         if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
1622             continue;
1623 
1624         pep = &pdev->eps[i];
1625         pep->pdev = pdev;
1626         pep->number = epnum;
1627         pep->direction = direction; /* 0 for OUT, 1 for IN. */
1628 
1629         /*
1630          * Ep0 is bidirectional, so ep0in and ep0out are represented by
1631          * pdev->eps[0]
1632          */
1633         if (epnum == 0) {
1634             snprintf(pep->name, sizeof(pep->name), "ep%d%s",
1635                  epnum, "BiDir");
1636 
1637             pep->idx = 0;
1638             usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
1639             pep->endpoint.maxburst = 1;
1640             pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
1641             pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
1642             pep->endpoint.comp_desc = NULL;
1643             pep->endpoint.caps.type_control = true;
1644             pep->endpoint.caps.dir_in = true;
1645             pep->endpoint.caps.dir_out = true;
1646 
1647             pdev->ep0_preq.epnum = pep->number;
1648             pdev->ep0_preq.pep = pep;
1649             pdev->gadget.ep0 = &pep->endpoint;
1650         } else {
1651             snprintf(pep->name, sizeof(pep->name), "ep%d%s",
1652                  epnum, (pep->direction) ? "in" : "out");
1653 
1654             pep->idx =  (epnum * 2 + (direction ? 1 : 0)) - 1;
1655             usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
1656 
1657             pep->endpoint.max_streams = max_streams;
1658             pep->endpoint.ops = &cdnsp_gadget_ep_ops;
1659             list_add_tail(&pep->endpoint.ep_list,
1660                       &pdev->gadget.ep_list);
1661 
1662             pep->endpoint.caps.type_iso = true;
1663             pep->endpoint.caps.type_bulk = true;
1664             pep->endpoint.caps.type_int = true;
1665 
1666             pep->endpoint.caps.dir_in = direction;
1667             pep->endpoint.caps.dir_out = !direction;
1668         }
1669 
1670         pep->endpoint.name = pep->name;
1671         pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
1672         pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
1673         cdnsp_get_ep_buffering(pdev, pep);
1674 
1675         dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
1676             "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
1677             "SupDir IN: %s, OUT: %s\n",
1678             pep->name, 1024,
1679             (pep->endpoint.caps.type_control) ? "yes" : "no",
1680             (pep->endpoint.caps.type_int) ? "yes" : "no",
1681             (pep->endpoint.caps.type_bulk) ? "yes" : "no",
1682             (pep->endpoint.caps.type_iso) ? "yes" : "no",
1683             (pep->endpoint.caps.dir_in) ? "yes" : "no",
1684             (pep->endpoint.caps.dir_out) ? "yes" : "no");
1685 
1686         INIT_LIST_HEAD(&pep->pending_list);
1687     }
1688 
1689     return 0;
1690 }
1691 
1692 static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
1693 {
1694     struct cdnsp_ep *pep;
1695     int i;
1696 
1697     for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
1698         pep = &pdev->eps[i];
1699         if (pep->number != 0 && pep->out_ctx)
1700             list_del(&pep->endpoint.ep_list);
1701     }
1702 }
1703 
1704 void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
1705 {
1706     pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
1707 
1708     if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
1709         spin_unlock(&pdev->lock);
1710         pdev->gadget_driver->disconnect(&pdev->gadget);
1711         spin_lock(&pdev->lock);
1712     }
1713 
1714     pdev->gadget.speed = USB_SPEED_UNKNOWN;
1715     usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
1716 
1717     pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
1718 }
1719 
1720 void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
1721 {
1722     if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
1723         spin_unlock(&pdev->lock);
1724         pdev->gadget_driver->suspend(&pdev->gadget);
1725         spin_lock(&pdev->lock);
1726     }
1727 }
1728 
1729 void cdnsp_resume_gadget(struct cdnsp_device *pdev)
1730 {
1731     if (pdev->gadget_driver && pdev->gadget_driver->resume) {
1732         spin_unlock(&pdev->lock);
1733         pdev->gadget_driver->resume(&pdev->gadget);
1734         spin_lock(&pdev->lock);
1735     }
1736 }
1737 
1738 void cdnsp_irq_reset(struct cdnsp_device *pdev)
1739 {
1740     struct cdnsp_port_regs __iomem *port_regs;
1741 
1742     cdnsp_reset_device(pdev);
1743 
1744     port_regs = pdev->active_port->regs;
1745     pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
1746 
1747     spin_unlock(&pdev->lock);
1748     usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
1749     spin_lock(&pdev->lock);
1750 
1751     switch (pdev->gadget.speed) {
1752     case USB_SPEED_SUPER_PLUS:
1753     case USB_SPEED_SUPER:
1754         cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1755         pdev->gadget.ep0->maxpacket = 512;
1756         break;
1757     case USB_SPEED_HIGH:
1758     case USB_SPEED_FULL:
1759         cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1760         pdev->gadget.ep0->maxpacket = 64;
1761         break;
1762     default:
1763         /* Low speed is not supported. */
1764         dev_err(pdev->dev, "Unknown device speed\n");
1765         break;
1766     }
1767 
1768     cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
1769     cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
1770     usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
1771 }
1772 
1773 static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
1774 {
1775     void __iomem *reg = &pdev->cap_regs->hc_capbase;
1776 
1777     reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
1778     pdev->rev_cap  = reg;
1779 
1780     dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
1781          readl(&pdev->rev_cap->ctrl_revision),
1782          readl(&pdev->rev_cap->rtl_revision),
1783          readl(&pdev->rev_cap->ep_supported),
1784          readl(&pdev->rev_cap->rx_buff_size),
1785          readl(&pdev->rev_cap->tx_buff_size));
1786 }
1787 
1788 static int cdnsp_gen_setup(struct cdnsp_device *pdev)
1789 {
1790     int ret;
1791     u32 reg;
1792 
1793     pdev->cap_regs = pdev->regs;
1794     pdev->op_regs = pdev->regs +
1795         HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
1796     pdev->run_regs = pdev->regs +
1797         (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
1798 
1799     /* Cache read-only capability registers */
1800     pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
1801     pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
1802     pdev->hci_version = HC_VERSION(pdev->hcc_params);
1803     pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
1804 
1805     cdnsp_get_rev_cap(pdev);
1806 
1807     /* Make sure the Device Controller is halted. */
1808     ret = cdnsp_halt(pdev);
1809     if (ret)
1810         return ret;
1811 
1812     /* Reset the internal controller memory state and registers. */
1813     ret = cdnsp_reset(pdev);
1814     if (ret)
1815         return ret;
1816 
1817     /*
1818      * Set dma_mask and coherent_dma_mask to 64-bits,
1819      * if controller supports 64-bit addressing.
1820      */
1821     if (HCC_64BIT_ADDR(pdev->hcc_params) &&
1822         !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
1823         dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
1824         dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
1825     } else {
1826         /*
1827          * This is to avoid error in cases where a 32-bit USB
1828          * controller is used on a 64-bit capable system.
1829          */
1830         ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
1831         if (ret)
1832             return ret;
1833 
1834         dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
1835         dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
1836     }
1837 
1838     spin_lock_init(&pdev->lock);
1839 
1840     ret = cdnsp_mem_init(pdev);
1841     if (ret)
1842         return ret;
1843 
1844     /*
1845      * Software workaround for U1: after transition
1846      * to U1 the controller starts gating clock, and in some cases,
1847      * it causes that controller stack.
1848      */
1849     reg = readl(&pdev->port3x_regs->mode_2);
1850     reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
1851     writel(reg, &pdev->port3x_regs->mode_2);
1852 
1853     return 0;
1854 }
1855 
1856 static int __cdnsp_gadget_init(struct cdns *cdns)
1857 {
1858     struct cdnsp_device *pdev;
1859     u32 max_speed;
1860     int ret = -ENOMEM;
1861 
1862     cdns_drd_gadget_on(cdns);
1863 
1864     pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
1865     if (!pdev)
1866         return -ENOMEM;
1867 
1868     pm_runtime_get_sync(cdns->dev);
1869 
1870     cdns->gadget_dev = pdev;
1871     pdev->dev = cdns->dev;
1872     pdev->regs = cdns->dev_regs;
1873     max_speed = usb_get_maximum_speed(cdns->dev);
1874 
1875     switch (max_speed) {
1876     case USB_SPEED_FULL:
1877     case USB_SPEED_HIGH:
1878     case USB_SPEED_SUPER:
1879     case USB_SPEED_SUPER_PLUS:
1880         break;
1881     default:
1882         dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
1883         fallthrough;
1884     case USB_SPEED_UNKNOWN:
1885         /* Default to SSP */
1886         max_speed = USB_SPEED_SUPER_PLUS;
1887         break;
1888     }
1889 
1890     pdev->gadget.ops = &cdnsp_gadget_ops;
1891     pdev->gadget.name = "cdnsp-gadget";
1892     pdev->gadget.speed = USB_SPEED_UNKNOWN;
1893     pdev->gadget.sg_supported = 1;
1894     pdev->gadget.max_speed = max_speed;
1895     pdev->gadget.lpm_capable = 1;
1896 
1897     pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
1898     if (!pdev->setup_buf)
1899         goto free_pdev;
1900 
1901     /*
1902      * Controller supports not aligned buffer but it should improve
1903      * performance.
1904      */
1905     pdev->gadget.quirk_ep_out_aligned_size = true;
1906 
1907     ret = cdnsp_gen_setup(pdev);
1908     if (ret) {
1909         dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
1910         goto free_setup;
1911     }
1912 
1913     ret = cdnsp_gadget_init_endpoints(pdev);
1914     if (ret) {
1915         dev_err(pdev->dev, "failed to initialize endpoints\n");
1916         goto halt_pdev;
1917     }
1918 
1919     ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
1920     if (ret) {
1921         dev_err(pdev->dev, "failed to register udc\n");
1922         goto free_endpoints;
1923     }
1924 
1925     ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
1926                     cdnsp_irq_handler,
1927                     cdnsp_thread_irq_handler, IRQF_SHARED,
1928                     dev_name(pdev->dev), pdev);
1929     if (ret)
1930         goto del_gadget;
1931 
1932     return 0;
1933 
1934 del_gadget:
1935     usb_del_gadget_udc(&pdev->gadget);
1936 free_endpoints:
1937     cdnsp_gadget_free_endpoints(pdev);
1938 halt_pdev:
1939     cdnsp_halt(pdev);
1940     cdnsp_reset(pdev);
1941     cdnsp_mem_cleanup(pdev);
1942 free_setup:
1943     kfree(pdev->setup_buf);
1944 free_pdev:
1945     kfree(pdev);
1946 
1947     return ret;
1948 }
1949 
1950 static void cdnsp_gadget_exit(struct cdns *cdns)
1951 {
1952     struct cdnsp_device *pdev = cdns->gadget_dev;
1953 
1954     devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
1955     pm_runtime_mark_last_busy(cdns->dev);
1956     pm_runtime_put_autosuspend(cdns->dev);
1957     usb_del_gadget_udc(&pdev->gadget);
1958     cdnsp_gadget_free_endpoints(pdev);
1959     cdnsp_mem_cleanup(pdev);
1960     kfree(pdev);
1961     cdns->gadget_dev = NULL;
1962     cdns_drd_gadget_off(cdns);
1963 }
1964 
1965 static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
1966 {
1967     struct cdnsp_device *pdev = cdns->gadget_dev;
1968     unsigned long flags;
1969 
1970     if (pdev->link_state == XDEV_U3)
1971         return 0;
1972 
1973     spin_lock_irqsave(&pdev->lock, flags);
1974     cdnsp_disconnect_gadget(pdev);
1975     cdnsp_stop(pdev);
1976     spin_unlock_irqrestore(&pdev->lock, flags);
1977 
1978     return 0;
1979 }
1980 
1981 static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
1982 {
1983     struct cdnsp_device *pdev = cdns->gadget_dev;
1984     enum usb_device_speed max_speed;
1985     unsigned long flags;
1986     int ret;
1987 
1988     if (!pdev->gadget_driver)
1989         return 0;
1990 
1991     spin_lock_irqsave(&pdev->lock, flags);
1992     max_speed = pdev->gadget_driver->max_speed;
1993 
1994     /* Limit speed if necessary. */
1995     max_speed = min(max_speed, pdev->gadget.max_speed);
1996 
1997     ret = cdnsp_run(pdev, max_speed);
1998 
1999     if (pdev->link_state == XDEV_U3)
2000         __cdnsp_gadget_wakeup(pdev);
2001 
2002     spin_unlock_irqrestore(&pdev->lock, flags);
2003 
2004     return ret;
2005 }
2006 
2007 /**
2008  * cdnsp_gadget_init - initialize device structure
2009  * @cdns: cdnsp instance
2010  *
2011  * This function initializes the gadget.
2012  */
2013 int cdnsp_gadget_init(struct cdns *cdns)
2014 {
2015     struct cdns_role_driver *rdrv;
2016 
2017     rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
2018     if (!rdrv)
2019         return -ENOMEM;
2020 
2021     rdrv->start = __cdnsp_gadget_init;
2022     rdrv->stop  = cdnsp_gadget_exit;
2023     rdrv->suspend   = cdnsp_gadget_suspend;
2024     rdrv->resume    = cdnsp_gadget_resume;
2025     rdrv->state = CDNS_ROLE_STATE_INACTIVE;
2026     rdrv->name  = "gadget";
2027     cdns->roles[USB_ROLE_DEVICE] = rdrv;
2028 
2029     return 0;
2030 }