Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /*
0003  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
0004  *
0005  * Copyright (C) 2004-2013 Synopsys, Inc.
0006  *
0007  * Redistribution and use in source and binary forms, with or without
0008  * modification, are permitted provided that the following conditions
0009  * are met:
0010  * 1. Redistributions of source code must retain the above copyright
0011  *    notice, this list of conditions, and the following disclaimer,
0012  *    without modification.
0013  * 2. Redistributions in binary form must reproduce the above copyright
0014  *    notice, this list of conditions and the following disclaimer in the
0015  *    documentation and/or other materials provided with the distribution.
0016  * 3. The names of the above-listed copyright holders may not be used
0017  *    to endorse or promote products derived from this software without
0018  *    specific prior written permission.
0019  *
0020  * ALTERNATIVELY, this software may be distributed under the terms of the
0021  * GNU General Public License ("GPL") as published by the Free Software
0022  * Foundation; either version 2 of the License, or (at your option) any
0023  * later version.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
0026  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
0027  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
0028  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
0029  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
0030  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
0031  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
0032  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
0033  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
0034  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0035  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0036  */
0037 
0038 /*
0039  * This file contains the interrupt handlers for Host mode
0040  */
0041 #include <linux/kernel.h>
0042 #include <linux/module.h>
0043 #include <linux/spinlock.h>
0044 #include <linux/interrupt.h>
0045 #include <linux/dma-mapping.h>
0046 #include <linux/io.h>
0047 #include <linux/slab.h>
0048 #include <linux/usb.h>
0049 
0050 #include <linux/usb/hcd.h>
0051 #include <linux/usb/ch11.h>
0052 
0053 #include "core.h"
0054 #include "hcd.h"
0055 
0056 /*
0057  * If we get this many NAKs on a split transaction we'll slow down
0058  * retransmission.  A 1 here means delay after the first NAK.
0059  */
0060 #define DWC2_NAKS_BEFORE_DELAY      3
0061 
0062 /* This function is for debug only */
0063 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
0064 {
0065     u16 curr_frame_number = hsotg->frame_number;
0066     u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
0067 
0068     if (expected != curr_frame_number)
0069         dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
0070                   expected, curr_frame_number);
0071 
0072 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
0073     if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
0074         if (expected != curr_frame_number) {
0075             hsotg->frame_num_array[hsotg->frame_num_idx] =
0076                     curr_frame_number;
0077             hsotg->last_frame_num_array[hsotg->frame_num_idx] =
0078                     hsotg->last_frame_num;
0079             hsotg->frame_num_idx++;
0080         }
0081     } else if (!hsotg->dumped_frame_num_array) {
0082         int i;
0083 
0084         dev_info(hsotg->dev, "Frame     Last Frame\n");
0085         dev_info(hsotg->dev, "-----     ----------\n");
0086         for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
0087             dev_info(hsotg->dev, "0x%04x    0x%04x\n",
0088                  hsotg->frame_num_array[i],
0089                  hsotg->last_frame_num_array[i]);
0090         }
0091         hsotg->dumped_frame_num_array = 1;
0092     }
0093 #endif
0094     hsotg->last_frame_num = curr_frame_number;
0095 }
0096 
0097 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
0098                     struct dwc2_host_chan *chan,
0099                     struct dwc2_qtd *qtd)
0100 {
0101     struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
0102     struct urb *usb_urb;
0103 
0104     if (!chan->qh)
0105         return;
0106 
0107     if (chan->qh->dev_speed == USB_SPEED_HIGH)
0108         return;
0109 
0110     if (!qtd->urb)
0111         return;
0112 
0113     usb_urb = qtd->urb->priv;
0114     if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
0115         return;
0116 
0117     /*
0118      * The root hub doesn't really have a TT, but Linux thinks it
0119      * does because how could you have a "high speed hub" that
0120      * directly talks directly to low speed devices without a TT?
0121      * It's all lies.  Lies, I tell you.
0122      */
0123     if (usb_urb->dev->tt->hub == root_hub)
0124         return;
0125 
0126     if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
0127         chan->qh->tt_buffer_dirty = 1;
0128         if (usb_hub_clear_tt_buffer(usb_urb))
0129             /* Clear failed; let's hope things work anyway */
0130             chan->qh->tt_buffer_dirty = 0;
0131     }
0132 }
0133 
0134 /*
0135  * Handles the start-of-frame interrupt in host mode. Non-periodic
0136  * transactions may be queued to the DWC_otg controller for the current
0137  * (micro)frame. Periodic transactions may be queued to the controller
0138  * for the next (micro)frame.
0139  */
0140 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
0141 {
0142     struct list_head *qh_entry;
0143     struct dwc2_qh *qh;
0144     enum dwc2_transaction_type tr_type;
0145 
0146     /* Clear interrupt */
0147     dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
0148 
0149 #ifdef DEBUG_SOF
0150     dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
0151 #endif
0152 
0153     hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
0154 
0155     dwc2_track_missed_sofs(hsotg);
0156 
0157     /* Determine whether any periodic QHs should be executed */
0158     qh_entry = hsotg->periodic_sched_inactive.next;
0159     while (qh_entry != &hsotg->periodic_sched_inactive) {
0160         qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
0161         qh_entry = qh_entry->next;
0162         if (dwc2_frame_num_le(qh->next_active_frame,
0163                       hsotg->frame_number)) {
0164             dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
0165                       qh, hsotg->frame_number,
0166                       qh->next_active_frame);
0167 
0168             /*
0169              * Move QH to the ready list to be executed next
0170              * (micro)frame
0171              */
0172             list_move_tail(&qh->qh_list_entry,
0173                        &hsotg->periodic_sched_ready);
0174         }
0175     }
0176     tr_type = dwc2_hcd_select_transactions(hsotg);
0177     if (tr_type != DWC2_TRANSACTION_NONE)
0178         dwc2_hcd_queue_transactions(hsotg, tr_type);
0179 }
0180 
0181 /*
0182  * Handles the Rx FIFO Level Interrupt, which indicates that there is
0183  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
0184  * memory if the DWC_otg controller is operating in Slave mode.
0185  */
0186 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
0187 {
0188     u32 grxsts, chnum, bcnt, dpid, pktsts;
0189     struct dwc2_host_chan *chan;
0190 
0191     if (dbg_perio())
0192         dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
0193 
0194     grxsts = dwc2_readl(hsotg, GRXSTSP);
0195     chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
0196     chan = hsotg->hc_ptr_array[chnum];
0197     if (!chan) {
0198         dev_err(hsotg->dev, "Unable to get corresponding channel\n");
0199         return;
0200     }
0201 
0202     bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
0203     dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
0204     pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
0205 
0206     /* Packet Status */
0207     if (dbg_perio()) {
0208         dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
0209         dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
0210         dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
0211              chan->data_pid_start);
0212         dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
0213     }
0214 
0215     switch (pktsts) {
0216     case GRXSTS_PKTSTS_HCHIN:
0217         /* Read the data into the host buffer */
0218         if (bcnt > 0) {
0219             dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
0220 
0221             /* Update the HC fields for the next packet received */
0222             chan->xfer_count += bcnt;
0223             chan->xfer_buf += bcnt;
0224         }
0225         break;
0226     case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
0227     case GRXSTS_PKTSTS_DATATOGGLEERR:
0228     case GRXSTS_PKTSTS_HCHHALTED:
0229         /* Handled in interrupt, just ignore data */
0230         break;
0231     default:
0232         dev_err(hsotg->dev,
0233             "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
0234         break;
0235     }
0236 }
0237 
0238 /*
0239  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
0240  * data packets may be written to the FIFO for OUT transfers. More requests
0241  * may be written to the non-periodic request queue for IN transfers. This
0242  * interrupt is enabled only in Slave mode.
0243  */
0244 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
0245 {
0246     dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
0247     dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
0248 }
0249 
0250 /*
0251  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
0252  * packets may be written to the FIFO for OUT transfers. More requests may be
0253  * written to the periodic request queue for IN transfers. This interrupt is
0254  * enabled only in Slave mode.
0255  */
0256 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
0257 {
0258     if (dbg_perio())
0259         dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
0260     dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
0261 }
0262 
0263 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
0264                   u32 *hprt0_modify)
0265 {
0266     struct dwc2_core_params *params = &hsotg->params;
0267     int do_reset = 0;
0268     u32 usbcfg;
0269     u32 prtspd;
0270     u32 hcfg;
0271     u32 fslspclksel;
0272     u32 hfir;
0273 
0274     dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
0275 
0276     /* Every time when port enables calculate HFIR.FrInterval */
0277     hfir = dwc2_readl(hsotg, HFIR);
0278     hfir &= ~HFIR_FRINT_MASK;
0279     hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
0280         HFIR_FRINT_MASK;
0281     dwc2_writel(hsotg, hfir, HFIR);
0282 
0283     /* Check if we need to adjust the PHY clock speed for low power */
0284     if (!params->host_support_fs_ls_low_power) {
0285         /* Port has been enabled, set the reset change flag */
0286         hsotg->flags.b.port_reset_change = 1;
0287         return;
0288     }
0289 
0290     usbcfg = dwc2_readl(hsotg, GUSBCFG);
0291     prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
0292 
0293     if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
0294         /* Low power */
0295         if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
0296             /* Set PHY low power clock select for FS/LS devices */
0297             usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
0298             dwc2_writel(hsotg, usbcfg, GUSBCFG);
0299             do_reset = 1;
0300         }
0301 
0302         hcfg = dwc2_readl(hsotg, HCFG);
0303         fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
0304                   HCFG_FSLSPCLKSEL_SHIFT;
0305 
0306         if (prtspd == HPRT0_SPD_LOW_SPEED &&
0307             params->host_ls_low_power_phy_clk) {
0308             /* 6 MHZ */
0309             dev_vdbg(hsotg->dev,
0310                  "FS_PHY programming HCFG to 6 MHz\n");
0311             if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
0312                 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
0313                 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
0314                 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
0315                 dwc2_writel(hsotg, hcfg, HCFG);
0316                 do_reset = 1;
0317             }
0318         } else {
0319             /* 48 MHZ */
0320             dev_vdbg(hsotg->dev,
0321                  "FS_PHY programming HCFG to 48 MHz\n");
0322             if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
0323                 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
0324                 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
0325                 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
0326                 dwc2_writel(hsotg, hcfg, HCFG);
0327                 do_reset = 1;
0328             }
0329         }
0330     } else {
0331         /* Not low power */
0332         if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
0333             usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
0334             dwc2_writel(hsotg, usbcfg, GUSBCFG);
0335             do_reset = 1;
0336         }
0337     }
0338 
0339     if (do_reset) {
0340         *hprt0_modify |= HPRT0_RST;
0341         dwc2_writel(hsotg, *hprt0_modify, HPRT0);
0342         queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
0343                    msecs_to_jiffies(60));
0344     } else {
0345         /* Port has been enabled, set the reset change flag */
0346         hsotg->flags.b.port_reset_change = 1;
0347     }
0348 }
0349 
0350 /*
0351  * There are multiple conditions that can cause a port interrupt. This function
0352  * determines which interrupt conditions have occurred and handles them
0353  * appropriately.
0354  */
0355 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
0356 {
0357     u32 hprt0;
0358     u32 hprt0_modify;
0359 
0360     dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
0361 
0362     hprt0 = dwc2_readl(hsotg, HPRT0);
0363     hprt0_modify = hprt0;
0364 
0365     /*
0366      * Clear appropriate bits in HPRT0 to clear the interrupt bit in
0367      * GINTSTS
0368      */
0369     hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
0370               HPRT0_OVRCURRCHG);
0371 
0372     /*
0373      * Port Connect Detected
0374      * Set flag and clear if detected
0375      */
0376     if (hprt0 & HPRT0_CONNDET) {
0377         dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
0378 
0379         dev_vdbg(hsotg->dev,
0380              "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
0381              hprt0);
0382         dwc2_hcd_connect(hsotg);
0383 
0384         /*
0385          * The Hub driver asserts a reset when it sees port connect
0386          * status change flag
0387          */
0388     }
0389 
0390     /*
0391      * Port Enable Changed
0392      * Clear if detected - Set internal flag if disabled
0393      */
0394     if (hprt0 & HPRT0_ENACHG) {
0395         dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
0396         dev_vdbg(hsotg->dev,
0397              "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
0398              hprt0, !!(hprt0 & HPRT0_ENA));
0399         if (hprt0 & HPRT0_ENA) {
0400             hsotg->new_connection = true;
0401             dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
0402         } else {
0403             hsotg->flags.b.port_enable_change = 1;
0404             if (hsotg->params.dma_desc_fs_enable) {
0405                 u32 hcfg;
0406 
0407                 hsotg->params.dma_desc_enable = false;
0408                 hsotg->new_connection = false;
0409                 hcfg = dwc2_readl(hsotg, HCFG);
0410                 hcfg &= ~HCFG_DESCDMA;
0411                 dwc2_writel(hsotg, hcfg, HCFG);
0412             }
0413         }
0414     }
0415 
0416     /* Overcurrent Change Interrupt */
0417     if (hprt0 & HPRT0_OVRCURRCHG) {
0418         dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
0419                 HPRT0);
0420         dev_vdbg(hsotg->dev,
0421              "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
0422              hprt0);
0423         hsotg->flags.b.port_over_current_change = 1;
0424     }
0425 }
0426 
0427 /*
0428  * Gets the actual length of a transfer after the transfer halts. halt_status
0429  * holds the reason for the halt.
0430  *
0431  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
0432  * is set to 1 upon return if less than the requested number of bytes were
0433  * transferred. short_read may also be NULL on entry, in which case it remains
0434  * unchanged.
0435  */
0436 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
0437                        struct dwc2_host_chan *chan, int chnum,
0438                        struct dwc2_qtd *qtd,
0439                        enum dwc2_halt_status halt_status,
0440                        int *short_read)
0441 {
0442     u32 hctsiz, count, length;
0443 
0444     hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
0445 
0446     if (halt_status == DWC2_HC_XFER_COMPLETE) {
0447         if (chan->ep_is_in) {
0448             count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
0449                 TSIZ_XFERSIZE_SHIFT;
0450             length = chan->xfer_len - count;
0451             if (short_read)
0452                 *short_read = (count != 0);
0453         } else if (chan->qh->do_split) {
0454             length = qtd->ssplit_out_xfer_count;
0455         } else {
0456             length = chan->xfer_len;
0457         }
0458     } else {
0459         /*
0460          * Must use the hctsiz.pktcnt field to determine how much data
0461          * has been transferred. This field reflects the number of
0462          * packets that have been transferred via the USB. This is
0463          * always an integral number of packets if the transfer was
0464          * halted before its normal completion. (Can't use the
0465          * hctsiz.xfersize field because that reflects the number of
0466          * bytes transferred via the AHB, not the USB).
0467          */
0468         count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
0469         length = (chan->start_pkt_count - count) * chan->max_packet;
0470     }
0471 
0472     return length;
0473 }
0474 
0475 /**
0476  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
0477  * Complete interrupt on the host channel. Updates the actual_length field
0478  * of the URB based on the number of bytes transferred via the host channel.
0479  * Sets the URB status if the data transfer is finished.
0480  *
0481  * @hsotg: Programming view of the DWC_otg controller
0482  * @chan: Programming view of host channel
0483  * @chnum: Channel number
0484  * @urb: Processing URB
0485  * @qtd: Queue transfer descriptor
0486  *
0487  * Return: 1 if the data transfer specified by the URB is completely finished,
0488  * 0 otherwise
0489  */
0490 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
0491                  struct dwc2_host_chan *chan, int chnum,
0492                  struct dwc2_hcd_urb *urb,
0493                  struct dwc2_qtd *qtd)
0494 {
0495     u32 hctsiz;
0496     int xfer_done = 0;
0497     int short_read = 0;
0498     int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
0499                               DWC2_HC_XFER_COMPLETE,
0500                               &short_read);
0501 
0502     if (urb->actual_length + xfer_length > urb->length) {
0503         dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
0504         xfer_length = urb->length - urb->actual_length;
0505     }
0506 
0507     dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
0508          urb->actual_length, xfer_length);
0509     urb->actual_length += xfer_length;
0510 
0511     if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
0512         (urb->flags & URB_SEND_ZERO_PACKET) &&
0513         urb->actual_length >= urb->length &&
0514         !(urb->length % chan->max_packet)) {
0515         xfer_done = 0;
0516     } else if (short_read || urb->actual_length >= urb->length) {
0517         xfer_done = 1;
0518         urb->status = 0;
0519     }
0520 
0521     hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
0522     dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
0523          __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
0524     dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
0525     dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
0526          (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
0527     dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
0528     dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
0529     dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
0530          xfer_done);
0531 
0532     return xfer_done;
0533 }
0534 
0535 /*
0536  * Save the starting data toggle for the next transfer. The data toggle is
0537  * saved in the QH for non-control transfers and it's saved in the QTD for
0538  * control transfers.
0539  */
0540 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
0541                    struct dwc2_host_chan *chan, int chnum,
0542                    struct dwc2_qtd *qtd)
0543 {
0544     u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
0545     u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
0546 
0547     if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
0548         if (WARN(!chan || !chan->qh,
0549              "chan->qh must be specified for non-control eps\n"))
0550             return;
0551 
0552         if (pid == TSIZ_SC_MC_PID_DATA0)
0553             chan->qh->data_toggle = DWC2_HC_PID_DATA0;
0554         else
0555             chan->qh->data_toggle = DWC2_HC_PID_DATA1;
0556     } else {
0557         if (WARN(!qtd,
0558              "qtd must be specified for control eps\n"))
0559             return;
0560 
0561         if (pid == TSIZ_SC_MC_PID_DATA0)
0562             qtd->data_toggle = DWC2_HC_PID_DATA0;
0563         else
0564             qtd->data_toggle = DWC2_HC_PID_DATA1;
0565     }
0566 }
0567 
0568 /**
0569  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
0570  * the transfer is stopped for any reason. The fields of the current entry in
0571  * the frame descriptor array are set based on the transfer state and the input
0572  * halt_status. Completes the Isochronous URB if all the URB frames have been
0573  * completed.
0574  *
0575  * @hsotg: Programming view of the DWC_otg controller
0576  * @chan: Programming view of host channel
0577  * @chnum: Channel number
0578  * @halt_status: Reason for halting a host channel
0579  * @qtd: Queue transfer descriptor
0580  *
0581  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
0582  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
0583  */
0584 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
0585         struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
0586         int chnum, struct dwc2_qtd *qtd,
0587         enum dwc2_halt_status halt_status)
0588 {
0589     struct dwc2_hcd_iso_packet_desc *frame_desc;
0590     struct dwc2_hcd_urb *urb = qtd->urb;
0591 
0592     if (!urb)
0593         return DWC2_HC_XFER_NO_HALT_STATUS;
0594 
0595     frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
0596 
0597     switch (halt_status) {
0598     case DWC2_HC_XFER_COMPLETE:
0599         frame_desc->status = 0;
0600         frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
0601                     chan, chnum, qtd, halt_status, NULL);
0602         break;
0603     case DWC2_HC_XFER_FRAME_OVERRUN:
0604         urb->error_count++;
0605         if (chan->ep_is_in)
0606             frame_desc->status = -ENOSR;
0607         else
0608             frame_desc->status = -ECOMM;
0609         frame_desc->actual_length = 0;
0610         break;
0611     case DWC2_HC_XFER_BABBLE_ERR:
0612         urb->error_count++;
0613         frame_desc->status = -EOVERFLOW;
0614         /* Don't need to update actual_length in this case */
0615         break;
0616     case DWC2_HC_XFER_XACT_ERR:
0617         urb->error_count++;
0618         frame_desc->status = -EPROTO;
0619         frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
0620                     chan, chnum, qtd, halt_status, NULL);
0621 
0622         /* Skip whole frame */
0623         if (chan->qh->do_split &&
0624             chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
0625             hsotg->params.host_dma) {
0626             qtd->complete_split = 0;
0627             qtd->isoc_split_offset = 0;
0628         }
0629 
0630         break;
0631     default:
0632         dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
0633             halt_status);
0634         break;
0635     }
0636 
0637     if (++qtd->isoc_frame_index == urb->packet_count) {
0638         /*
0639          * urb->status is not used for isoc transfers. The individual
0640          * frame_desc statuses are used instead.
0641          */
0642         dwc2_host_complete(hsotg, qtd, 0);
0643         halt_status = DWC2_HC_XFER_URB_COMPLETE;
0644     } else {
0645         halt_status = DWC2_HC_XFER_COMPLETE;
0646     }
0647 
0648     return halt_status;
0649 }
0650 
0651 /*
0652  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
0653  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
0654  * still linked to the QH, the QH is added to the end of the inactive
0655  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
0656  * schedule if no more QTDs are linked to the QH.
0657  */
0658 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0659                    int free_qtd)
0660 {
0661     int continue_split = 0;
0662     struct dwc2_qtd *qtd;
0663 
0664     if (dbg_qh(qh))
0665         dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
0666              hsotg, qh, free_qtd);
0667 
0668     if (list_empty(&qh->qtd_list)) {
0669         dev_dbg(hsotg->dev, "## QTD list empty ##\n");
0670         goto no_qtd;
0671     }
0672 
0673     qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
0674 
0675     if (qtd->complete_split)
0676         continue_split = 1;
0677     else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
0678          qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
0679         continue_split = 1;
0680 
0681     if (free_qtd) {
0682         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
0683         continue_split = 0;
0684     }
0685 
0686 no_qtd:
0687     qh->channel = NULL;
0688     dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
0689 }
0690 
0691 /**
0692  * dwc2_release_channel() - Releases a host channel for use by other transfers
0693  *
0694  * @hsotg:       The HCD state structure
0695  * @chan:        The host channel to release
0696  * @qtd:         The QTD associated with the host channel. This QTD may be
0697  *               freed if the transfer is complete or an error has occurred.
0698  * @halt_status: Reason the channel is being released. This status
0699  *               determines the actions taken by this function.
0700  *
0701  * Also attempts to select and queue more transactions since at least one host
0702  * channel is available.
0703  */
0704 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
0705                  struct dwc2_host_chan *chan,
0706                  struct dwc2_qtd *qtd,
0707                  enum dwc2_halt_status halt_status)
0708 {
0709     enum dwc2_transaction_type tr_type;
0710     u32 haintmsk;
0711     int free_qtd = 0;
0712 
0713     if (dbg_hc(chan))
0714         dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
0715              __func__, chan->hc_num, halt_status);
0716 
0717     switch (halt_status) {
0718     case DWC2_HC_XFER_URB_COMPLETE:
0719         free_qtd = 1;
0720         break;
0721     case DWC2_HC_XFER_AHB_ERR:
0722     case DWC2_HC_XFER_STALL:
0723     case DWC2_HC_XFER_BABBLE_ERR:
0724         free_qtd = 1;
0725         break;
0726     case DWC2_HC_XFER_XACT_ERR:
0727         if (qtd && qtd->error_count >= 3) {
0728             dev_vdbg(hsotg->dev,
0729                  "  Complete URB with transaction error\n");
0730             free_qtd = 1;
0731             dwc2_host_complete(hsotg, qtd, -EPROTO);
0732         }
0733         break;
0734     case DWC2_HC_XFER_URB_DEQUEUE:
0735         /*
0736          * The QTD has already been removed and the QH has been
0737          * deactivated. Don't want to do anything except release the
0738          * host channel and try to queue more transfers.
0739          */
0740         goto cleanup;
0741     case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
0742         dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
0743         free_qtd = 1;
0744         dwc2_host_complete(hsotg, qtd, -EIO);
0745         break;
0746     case DWC2_HC_XFER_NO_HALT_STATUS:
0747     default:
0748         break;
0749     }
0750 
0751     dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
0752 
0753 cleanup:
0754     /*
0755      * Release the host channel for use by other transfers. The cleanup
0756      * function clears the channel interrupt enables and conditions, so
0757      * there's no need to clear the Channel Halted interrupt separately.
0758      */
0759     if (!list_empty(&chan->hc_list_entry))
0760         list_del(&chan->hc_list_entry);
0761     dwc2_hc_cleanup(hsotg, chan);
0762     list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
0763 
0764     if (hsotg->params.uframe_sched) {
0765         hsotg->available_host_channels++;
0766     } else {
0767         switch (chan->ep_type) {
0768         case USB_ENDPOINT_XFER_CONTROL:
0769         case USB_ENDPOINT_XFER_BULK:
0770             hsotg->non_periodic_channels--;
0771             break;
0772         default:
0773             /*
0774              * Don't release reservations for periodic channels
0775              * here. That's done when a periodic transfer is
0776              * descheduled (i.e. when the QH is removed from the
0777              * periodic schedule).
0778              */
0779             break;
0780         }
0781     }
0782 
0783     haintmsk = dwc2_readl(hsotg, HAINTMSK);
0784     haintmsk &= ~(1 << chan->hc_num);
0785     dwc2_writel(hsotg, haintmsk, HAINTMSK);
0786 
0787     /* Try to queue more transfers now that there's a free channel */
0788     tr_type = dwc2_hcd_select_transactions(hsotg);
0789     if (tr_type != DWC2_TRANSACTION_NONE)
0790         dwc2_hcd_queue_transactions(hsotg, tr_type);
0791 }
0792 
0793 /*
0794  * Halts a host channel. If the channel cannot be halted immediately because
0795  * the request queue is full, this function ensures that the FIFO empty
0796  * interrupt for the appropriate queue is enabled so that the halt request can
0797  * be queued when there is space in the request queue.
0798  *
0799  * This function may also be called in DMA mode. In that case, the channel is
0800  * simply released since the core always halts the channel automatically in
0801  * DMA mode.
0802  */
0803 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
0804                   struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
0805                   enum dwc2_halt_status halt_status)
0806 {
0807     if (dbg_hc(chan))
0808         dev_vdbg(hsotg->dev, "%s()\n", __func__);
0809 
0810     if (hsotg->params.host_dma) {
0811         if (dbg_hc(chan))
0812             dev_vdbg(hsotg->dev, "DMA enabled\n");
0813         dwc2_release_channel(hsotg, chan, qtd, halt_status);
0814         return;
0815     }
0816 
0817     /* Slave mode processing */
0818     dwc2_hc_halt(hsotg, chan, halt_status);
0819 
0820     if (chan->halt_on_queue) {
0821         u32 gintmsk;
0822 
0823         dev_vdbg(hsotg->dev, "Halt on queue\n");
0824         if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
0825             chan->ep_type == USB_ENDPOINT_XFER_BULK) {
0826             dev_vdbg(hsotg->dev, "control/bulk\n");
0827             /*
0828              * Make sure the Non-periodic Tx FIFO empty interrupt
0829              * is enabled so that the non-periodic schedule will
0830              * be processed
0831              */
0832             gintmsk = dwc2_readl(hsotg, GINTMSK);
0833             gintmsk |= GINTSTS_NPTXFEMP;
0834             dwc2_writel(hsotg, gintmsk, GINTMSK);
0835         } else {
0836             dev_vdbg(hsotg->dev, "isoc/intr\n");
0837             /*
0838              * Move the QH from the periodic queued schedule to
0839              * the periodic assigned schedule. This allows the
0840              * halt to be queued when the periodic schedule is
0841              * processed.
0842              */
0843             list_move_tail(&chan->qh->qh_list_entry,
0844                        &hsotg->periodic_sched_assigned);
0845 
0846             /*
0847              * Make sure the Periodic Tx FIFO Empty interrupt is
0848              * enabled so that the periodic schedule will be
0849              * processed
0850              */
0851             gintmsk = dwc2_readl(hsotg, GINTMSK);
0852             gintmsk |= GINTSTS_PTXFEMP;
0853             dwc2_writel(hsotg, gintmsk, GINTMSK);
0854         }
0855     }
0856 }
0857 
0858 /*
0859  * Performs common cleanup for non-periodic transfers after a Transfer
0860  * Complete interrupt. This function should be called after any endpoint type
0861  * specific handling is finished to release the host channel.
0862  */
0863 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
0864                         struct dwc2_host_chan *chan,
0865                         int chnum, struct dwc2_qtd *qtd,
0866                         enum dwc2_halt_status halt_status)
0867 {
0868     dev_vdbg(hsotg->dev, "%s()\n", __func__);
0869 
0870     qtd->error_count = 0;
0871 
0872     if (chan->hcint & HCINTMSK_NYET) {
0873         /*
0874          * Got a NYET on the last transaction of the transfer. This
0875          * means that the endpoint should be in the PING state at the
0876          * beginning of the next transfer.
0877          */
0878         dev_vdbg(hsotg->dev, "got NYET\n");
0879         chan->qh->ping_state = 1;
0880     }
0881 
0882     /*
0883      * Always halt and release the host channel to make it available for
0884      * more transfers. There may still be more phases for a control
0885      * transfer or more data packets for a bulk transfer at this point,
0886      * but the host channel is still halted. A channel will be reassigned
0887      * to the transfer when the non-periodic schedule is processed after
0888      * the channel is released. This allows transactions to be queued
0889      * properly via dwc2_hcd_queue_transactions, which also enables the
0890      * Tx FIFO Empty interrupt if necessary.
0891      */
0892     if (chan->ep_is_in) {
0893         /*
0894          * IN transfers in Slave mode require an explicit disable to
0895          * halt the channel. (In DMA mode, this call simply releases
0896          * the channel.)
0897          */
0898         dwc2_halt_channel(hsotg, chan, qtd, halt_status);
0899     } else {
0900         /*
0901          * The channel is automatically disabled by the core for OUT
0902          * transfers in Slave mode
0903          */
0904         dwc2_release_channel(hsotg, chan, qtd, halt_status);
0905     }
0906 }
0907 
0908 /*
0909  * Performs common cleanup for periodic transfers after a Transfer Complete
0910  * interrupt. This function should be called after any endpoint type specific
0911  * handling is finished to release the host channel.
0912  */
0913 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
0914                     struct dwc2_host_chan *chan, int chnum,
0915                     struct dwc2_qtd *qtd,
0916                     enum dwc2_halt_status halt_status)
0917 {
0918     u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
0919 
0920     qtd->error_count = 0;
0921 
0922     if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
0923         /* Core halts channel in these cases */
0924         dwc2_release_channel(hsotg, chan, qtd, halt_status);
0925     else
0926         /* Flush any outstanding requests from the Tx queue */
0927         dwc2_halt_channel(hsotg, chan, qtd, halt_status);
0928 }
0929 
0930 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
0931                        struct dwc2_host_chan *chan, int chnum,
0932                        struct dwc2_qtd *qtd)
0933 {
0934     struct dwc2_hcd_iso_packet_desc *frame_desc;
0935     u32 len;
0936     u32 hctsiz;
0937     u32 pid;
0938 
0939     if (!qtd->urb)
0940         return 0;
0941 
0942     frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
0943     len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
0944                       DWC2_HC_XFER_COMPLETE, NULL);
0945     if (!len && !qtd->isoc_split_offset) {
0946         qtd->complete_split = 0;
0947         return 0;
0948     }
0949 
0950     frame_desc->actual_length += len;
0951 
0952     if (chan->align_buf) {
0953         dev_vdbg(hsotg->dev, "non-aligned buffer\n");
0954         dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
0955                  DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
0956         memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
0957                chan->qh->dw_align_buf, len);
0958     }
0959 
0960     qtd->isoc_split_offset += len;
0961 
0962     hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
0963     pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
0964 
0965     if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
0966         frame_desc->status = 0;
0967         qtd->isoc_frame_index++;
0968         qtd->complete_split = 0;
0969         qtd->isoc_split_offset = 0;
0970     }
0971 
0972     if (qtd->isoc_frame_index == qtd->urb->packet_count) {
0973         dwc2_host_complete(hsotg, qtd, 0);
0974         dwc2_release_channel(hsotg, chan, qtd,
0975                      DWC2_HC_XFER_URB_COMPLETE);
0976     } else {
0977         dwc2_release_channel(hsotg, chan, qtd,
0978                      DWC2_HC_XFER_NO_HALT_STATUS);
0979     }
0980 
0981     return 1;   /* Indicates that channel released */
0982 }
0983 
0984 /*
0985  * Handles a host channel Transfer Complete interrupt. This handler may be
0986  * called in either DMA mode or Slave mode.
0987  */
0988 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
0989                   struct dwc2_host_chan *chan, int chnum,
0990                   struct dwc2_qtd *qtd)
0991 {
0992     struct dwc2_hcd_urb *urb = qtd->urb;
0993     enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
0994     int pipe_type;
0995     int urb_xfer_done;
0996 
0997     if (dbg_hc(chan))
0998         dev_vdbg(hsotg->dev,
0999              "--Host Channel %d Interrupt: Transfer Complete--\n",
1000              chnum);
1001 
1002     if (!urb)
1003         goto handle_xfercomp_done;
1004 
1005     pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1006 
1007     if (hsotg->params.dma_desc_enable) {
1008         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1009         if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1010             /* Do not disable the interrupt, just clear it */
1011             return;
1012         goto handle_xfercomp_done;
1013     }
1014 
1015     /* Handle xfer complete on CSPLIT */
1016     if (chan->qh->do_split) {
1017         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1018             hsotg->params.host_dma) {
1019             if (qtd->complete_split &&
1020                 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1021                             qtd))
1022                 goto handle_xfercomp_done;
1023         } else {
1024             qtd->complete_split = 0;
1025         }
1026     }
1027 
1028     /* Update the QTD and URB states */
1029     switch (pipe_type) {
1030     case USB_ENDPOINT_XFER_CONTROL:
1031         switch (qtd->control_phase) {
1032         case DWC2_CONTROL_SETUP:
1033             if (urb->length > 0)
1034                 qtd->control_phase = DWC2_CONTROL_DATA;
1035             else
1036                 qtd->control_phase = DWC2_CONTROL_STATUS;
1037             dev_vdbg(hsotg->dev,
1038                  "  Control setup transaction done\n");
1039             halt_status = DWC2_HC_XFER_COMPLETE;
1040             break;
1041         case DWC2_CONTROL_DATA:
1042             urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1043                                   chnum, urb, qtd);
1044             if (urb_xfer_done) {
1045                 qtd->control_phase = DWC2_CONTROL_STATUS;
1046                 dev_vdbg(hsotg->dev,
1047                      "  Control data transfer done\n");
1048             } else {
1049                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1050                               qtd);
1051             }
1052             halt_status = DWC2_HC_XFER_COMPLETE;
1053             break;
1054         case DWC2_CONTROL_STATUS:
1055             dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1056             if (urb->status == -EINPROGRESS)
1057                 urb->status = 0;
1058             dwc2_host_complete(hsotg, qtd, urb->status);
1059             halt_status = DWC2_HC_XFER_URB_COMPLETE;
1060             break;
1061         }
1062 
1063         dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1064                         halt_status);
1065         break;
1066     case USB_ENDPOINT_XFER_BULK:
1067         dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1068         urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1069                               qtd);
1070         if (urb_xfer_done) {
1071             dwc2_host_complete(hsotg, qtd, urb->status);
1072             halt_status = DWC2_HC_XFER_URB_COMPLETE;
1073         } else {
1074             halt_status = DWC2_HC_XFER_COMPLETE;
1075         }
1076 
1077         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1078         dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1079                         halt_status);
1080         break;
1081     case USB_ENDPOINT_XFER_INT:
1082         dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1083         urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1084                               qtd);
1085 
1086         /*
1087          * Interrupt URB is done on the first transfer complete
1088          * interrupt
1089          */
1090         if (urb_xfer_done) {
1091             dwc2_host_complete(hsotg, qtd, urb->status);
1092             halt_status = DWC2_HC_XFER_URB_COMPLETE;
1093         } else {
1094             halt_status = DWC2_HC_XFER_COMPLETE;
1095         }
1096 
1097         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1098         dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1099                         halt_status);
1100         break;
1101     case USB_ENDPOINT_XFER_ISOC:
1102         if (dbg_perio())
1103             dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1104         if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1105             halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1106                             chnum, qtd,
1107                             DWC2_HC_XFER_COMPLETE);
1108         dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1109                         halt_status);
1110         break;
1111     }
1112 
1113 handle_xfercomp_done:
1114     disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1115 }
1116 
1117 /*
1118  * Handles a host channel STALL interrupt. This handler may be called in
1119  * either DMA mode or Slave mode.
1120  */
1121 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1122                    struct dwc2_host_chan *chan, int chnum,
1123                    struct dwc2_qtd *qtd)
1124 {
1125     struct dwc2_hcd_urb *urb = qtd->urb;
1126     int pipe_type;
1127 
1128     dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1129         chnum);
1130 
1131     if (hsotg->params.dma_desc_enable) {
1132         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1133                         DWC2_HC_XFER_STALL);
1134         goto handle_stall_done;
1135     }
1136 
1137     if (!urb)
1138         goto handle_stall_halt;
1139 
1140     pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1141 
1142     if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1143         dwc2_host_complete(hsotg, qtd, -EPIPE);
1144 
1145     if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1146         pipe_type == USB_ENDPOINT_XFER_INT) {
1147         dwc2_host_complete(hsotg, qtd, -EPIPE);
1148         /*
1149          * USB protocol requires resetting the data toggle for bulk
1150          * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1151          * setup command is issued to the endpoint. Anticipate the
1152          * CLEAR_FEATURE command since a STALL has occurred and reset
1153          * the data toggle now.
1154          */
1155         chan->qh->data_toggle = 0;
1156     }
1157 
1158 handle_stall_halt:
1159     dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1160 
1161 handle_stall_done:
1162     disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1163 }
1164 
1165 /*
1166  * Updates the state of the URB when a transfer has been stopped due to an
1167  * abnormal condition before the transfer completes. Modifies the
1168  * actual_length field of the URB to reflect the number of bytes that have
1169  * actually been transferred via the host channel.
1170  */
1171 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1172                       struct dwc2_host_chan *chan, int chnum,
1173                       struct dwc2_hcd_urb *urb,
1174                       struct dwc2_qtd *qtd,
1175                       enum dwc2_halt_status halt_status)
1176 {
1177     u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1178                               qtd, halt_status, NULL);
1179     u32 hctsiz;
1180 
1181     if (urb->actual_length + xfer_length > urb->length) {
1182         dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1183         xfer_length = urb->length - urb->actual_length;
1184     }
1185 
1186     urb->actual_length += xfer_length;
1187 
1188     hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1189     dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1190          __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1191     dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1192          chan->start_pkt_count);
1193     dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1194          (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1195     dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1196     dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1197          xfer_length);
1198     dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1199          urb->actual_length);
1200     dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1201          urb->length);
1202 }
1203 
1204 /*
1205  * Handles a host channel NAK interrupt. This handler may be called in either
1206  * DMA mode or Slave mode.
1207  */
1208 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1209                  struct dwc2_host_chan *chan, int chnum,
1210                  struct dwc2_qtd *qtd)
1211 {
1212     if (!qtd) {
1213         dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1214         return;
1215     }
1216 
1217     if (!qtd->urb) {
1218         dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1219         return;
1220     }
1221 
1222     if (dbg_hc(chan))
1223         dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1224              chnum);
1225 
1226     /*
1227      * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1228      * interrupt. Re-start the SSPLIT transfer.
1229      *
1230      * Normally for non-periodic transfers we'll retry right away, but to
1231      * avoid interrupt storms we'll wait before retrying if we've got
1232      * several NAKs. If we didn't do this we'd retry directly from the
1233      * interrupt handler and could end up quickly getting another
1234      * interrupt (another NAK), which we'd retry. Note that we do not
1235      * delay retries for IN parts of control requests, as those are expected
1236      * to complete fairly quickly, and if we delay them we risk confusing
1237      * the device and cause it issue STALL.
1238      *
1239      * Note that in DMA mode software only gets involved to re-send NAKed
1240      * transfers for split transactions, so we only need to apply this
1241      * delaying logic when handling splits. In non-DMA mode presumably we
1242      * might want a similar delay if someone can demonstrate this problem
1243      * affects that code path too.
1244      */
1245     if (chan->do_split) {
1246         if (chan->complete_split)
1247             qtd->error_count = 0;
1248         qtd->complete_split = 0;
1249         qtd->num_naks++;
1250         qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1251                 !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1252                   chan->ep_is_in);
1253         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1254         goto handle_nak_done;
1255     }
1256 
1257     switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1258     case USB_ENDPOINT_XFER_CONTROL:
1259     case USB_ENDPOINT_XFER_BULK:
1260         if (hsotg->params.host_dma && chan->ep_is_in) {
1261             /*
1262              * NAK interrupts are enabled on bulk/control IN
1263              * transfers in DMA mode for the sole purpose of
1264              * resetting the error count after a transaction error
1265              * occurs. The core will continue transferring data.
1266              */
1267             qtd->error_count = 0;
1268             break;
1269         }
1270 
1271         /*
1272          * NAK interrupts normally occur during OUT transfers in DMA
1273          * or Slave mode. For IN transfers, more requests will be
1274          * queued as request queue space is available.
1275          */
1276         qtd->error_count = 0;
1277 
1278         if (!chan->qh->ping_state) {
1279             dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1280                           qtd, DWC2_HC_XFER_NAK);
1281             dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1282 
1283             if (chan->speed == USB_SPEED_HIGH)
1284                 chan->qh->ping_state = 1;
1285         }
1286 
1287         /*
1288          * Halt the channel so the transfer can be re-started from
1289          * the appropriate point or the PING protocol will
1290          * start/continue
1291          */
1292         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1293         break;
1294     case USB_ENDPOINT_XFER_INT:
1295         qtd->error_count = 0;
1296         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1297         break;
1298     case USB_ENDPOINT_XFER_ISOC:
1299         /* Should never get called for isochronous transfers */
1300         dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1301         break;
1302     }
1303 
1304 handle_nak_done:
1305     disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1306 }
1307 
1308 /*
1309  * Handles a host channel ACK interrupt. This interrupt is enabled when
1310  * performing the PING protocol in Slave mode, when errors occur during
1311  * either Slave mode or DMA mode, and during Start Split transactions.
1312  */
1313 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1314                  struct dwc2_host_chan *chan, int chnum,
1315                  struct dwc2_qtd *qtd)
1316 {
1317     struct dwc2_hcd_iso_packet_desc *frame_desc;
1318 
1319     if (dbg_hc(chan))
1320         dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1321              chnum);
1322 
1323     if (chan->do_split) {
1324         /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1325         if (!chan->ep_is_in &&
1326             chan->data_pid_start != DWC2_HC_PID_SETUP)
1327             qtd->ssplit_out_xfer_count = chan->xfer_len;
1328 
1329         if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1330             qtd->complete_split = 1;
1331             dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1332         } else {
1333             /* ISOC OUT */
1334             switch (chan->xact_pos) {
1335             case DWC2_HCSPLT_XACTPOS_ALL:
1336                 break;
1337             case DWC2_HCSPLT_XACTPOS_END:
1338                 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1339                 qtd->isoc_split_offset = 0;
1340                 break;
1341             case DWC2_HCSPLT_XACTPOS_BEGIN:
1342             case DWC2_HCSPLT_XACTPOS_MID:
1343                 /*
1344                  * For BEGIN or MID, calculate the length for
1345                  * the next microframe to determine the correct
1346                  * SSPLIT token, either MID or END
1347                  */
1348                 frame_desc = &qtd->urb->iso_descs[
1349                         qtd->isoc_frame_index];
1350                 qtd->isoc_split_offset += 188;
1351 
1352                 if (frame_desc->length - qtd->isoc_split_offset
1353                             <= 188)
1354                     qtd->isoc_split_pos =
1355                             DWC2_HCSPLT_XACTPOS_END;
1356                 else
1357                     qtd->isoc_split_pos =
1358                             DWC2_HCSPLT_XACTPOS_MID;
1359                 break;
1360             }
1361         }
1362     } else {
1363         qtd->error_count = 0;
1364 
1365         if (chan->qh->ping_state) {
1366             chan->qh->ping_state = 0;
1367             /*
1368              * Halt the channel so the transfer can be re-started
1369              * from the appropriate point. This only happens in
1370              * Slave mode. In DMA mode, the ping_state is cleared
1371              * when the transfer is started because the core
1372              * automatically executes the PING, then the transfer.
1373              */
1374             dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1375         }
1376     }
1377 
1378     /*
1379      * If the ACK occurred when _not_ in the PING state, let the channel
1380      * continue transferring data after clearing the error count
1381      */
1382     disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1383 }
1384 
1385 /*
1386  * Handles a host channel NYET interrupt. This interrupt should only occur on
1387  * Bulk and Control OUT endpoints and for complete split transactions. If a
1388  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1389  * handled in the xfercomp interrupt handler, not here. This handler may be
1390  * called in either DMA mode or Slave mode.
1391  */
1392 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1393                   struct dwc2_host_chan *chan, int chnum,
1394                   struct dwc2_qtd *qtd)
1395 {
1396     if (dbg_hc(chan))
1397         dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1398              chnum);
1399 
1400     /*
1401      * NYET on CSPLIT
1402      * re-do the CSPLIT immediately on non-periodic
1403      */
1404     if (chan->do_split && chan->complete_split) {
1405         if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1406             hsotg->params.host_dma) {
1407             qtd->complete_split = 0;
1408             qtd->isoc_split_offset = 0;
1409             qtd->isoc_frame_index++;
1410             if (qtd->urb &&
1411                 qtd->isoc_frame_index == qtd->urb->packet_count) {
1412                 dwc2_host_complete(hsotg, qtd, 0);
1413                 dwc2_release_channel(hsotg, chan, qtd,
1414                              DWC2_HC_XFER_URB_COMPLETE);
1415             } else {
1416                 dwc2_release_channel(hsotg, chan, qtd,
1417                         DWC2_HC_XFER_NO_HALT_STATUS);
1418             }
1419             goto handle_nyet_done;
1420         }
1421 
1422         if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1423             chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1424             struct dwc2_qh *qh = chan->qh;
1425             bool past_end;
1426 
1427             if (!hsotg->params.uframe_sched) {
1428                 int frnum = dwc2_hcd_get_frame_number(hsotg);
1429 
1430                 /* Don't have num_hs_transfers; simple logic */
1431                 past_end = dwc2_full_frame_num(frnum) !=
1432                      dwc2_full_frame_num(qh->next_active_frame);
1433             } else {
1434                 int end_frnum;
1435 
1436                 /*
1437                  * Figure out the end frame based on
1438                  * schedule.
1439                  *
1440                  * We don't want to go on trying again
1441                  * and again forever. Let's stop when
1442                  * we've done all the transfers that
1443                  * were scheduled.
1444                  *
1445                  * We're going to be comparing
1446                  * start_active_frame and
1447                  * next_active_frame, both of which
1448                  * are 1 before the time the packet
1449                  * goes on the wire, so that cancels
1450                  * out. Basically if had 1 transfer
1451                  * and we saw 1 NYET then we're done.
1452                  * We're getting a NYET here so if
1453                  * next >= (start + num_transfers)
1454                  * we're done. The complexity is that
1455                  * for all but ISOC_OUT we skip one
1456                  * slot.
1457                  */
1458                 end_frnum = dwc2_frame_num_inc(
1459                     qh->start_active_frame,
1460                     qh->num_hs_transfers);
1461 
1462                 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
1463                     qh->ep_is_in)
1464                     end_frnum =
1465                            dwc2_frame_num_inc(end_frnum, 1);
1466 
1467                 past_end = dwc2_frame_num_le(
1468                     end_frnum, qh->next_active_frame);
1469             }
1470 
1471             if (past_end) {
1472                 /* Treat this as a transaction error. */
1473 #if 0
1474                 /*
1475                  * Todo: Fix system performance so this can
1476                  * be treated as an error. Right now complete
1477                  * splits cannot be scheduled precisely enough
1478                  * due to other system activity, so this error
1479                  * occurs regularly in Slave mode.
1480                  */
1481                 qtd->error_count++;
1482 #endif
1483                 qtd->complete_split = 0;
1484                 dwc2_halt_channel(hsotg, chan, qtd,
1485                           DWC2_HC_XFER_XACT_ERR);
1486                 /* Todo: add support for isoc release */
1487                 goto handle_nyet_done;
1488             }
1489         }
1490 
1491         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1492         goto handle_nyet_done;
1493     }
1494 
1495     chan->qh->ping_state = 1;
1496     qtd->error_count = 0;
1497 
1498     dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1499                   DWC2_HC_XFER_NYET);
1500     dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1501 
1502     /*
1503      * Halt the channel and re-start the transfer so the PING protocol
1504      * will start
1505      */
1506     dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1507 
1508 handle_nyet_done:
1509     disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1510 }
1511 
1512 /*
1513  * Handles a host channel babble interrupt. This handler may be called in
1514  * either DMA mode or Slave mode.
1515  */
1516 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1517                 struct dwc2_host_chan *chan, int chnum,
1518                 struct dwc2_qtd *qtd)
1519 {
1520     dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1521         chnum);
1522 
1523     dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1524 
1525     if (hsotg->params.dma_desc_enable) {
1526         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1527                         DWC2_HC_XFER_BABBLE_ERR);
1528         goto disable_int;
1529     }
1530 
1531     if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1532         dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1533         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1534     } else {
1535         enum dwc2_halt_status halt_status;
1536 
1537         halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1538                         qtd, DWC2_HC_XFER_BABBLE_ERR);
1539         dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1540     }
1541 
1542 disable_int:
1543     disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1544 }
1545 
1546 /*
1547  * Handles a host channel AHB error interrupt. This handler is only called in
1548  * DMA mode.
1549  */
1550 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1551                 struct dwc2_host_chan *chan, int chnum,
1552                 struct dwc2_qtd *qtd)
1553 {
1554     struct dwc2_hcd_urb *urb = qtd->urb;
1555     char *pipetype, *speed;
1556     u32 hcchar;
1557     u32 hcsplt;
1558     u32 hctsiz;
1559     u32 hc_dma;
1560 
1561     dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1562         chnum);
1563 
1564     if (!urb)
1565         goto handle_ahberr_halt;
1566 
1567     dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1568 
1569     hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1570     hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1571     hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1572     hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
1573 
1574     dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1575     dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1576     dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1577     dev_err(hsotg->dev, "  Device address: %d\n",
1578         dwc2_hcd_get_dev_addr(&urb->pipe_info));
1579     dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1580         dwc2_hcd_get_ep_num(&urb->pipe_info),
1581         dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1582 
1583     switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1584     case USB_ENDPOINT_XFER_CONTROL:
1585         pipetype = "CONTROL";
1586         break;
1587     case USB_ENDPOINT_XFER_BULK:
1588         pipetype = "BULK";
1589         break;
1590     case USB_ENDPOINT_XFER_INT:
1591         pipetype = "INTERRUPT";
1592         break;
1593     case USB_ENDPOINT_XFER_ISOC:
1594         pipetype = "ISOCHRONOUS";
1595         break;
1596     default:
1597         pipetype = "UNKNOWN";
1598         break;
1599     }
1600 
1601     dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1602 
1603     switch (chan->speed) {
1604     case USB_SPEED_HIGH:
1605         speed = "HIGH";
1606         break;
1607     case USB_SPEED_FULL:
1608         speed = "FULL";
1609         break;
1610     case USB_SPEED_LOW:
1611         speed = "LOW";
1612         break;
1613     default:
1614         speed = "UNKNOWN";
1615         break;
1616     }
1617 
1618     dev_err(hsotg->dev, "  Speed: %s\n", speed);
1619 
1620     dev_err(hsotg->dev, "  Max packet size: %d (mult %d)\n",
1621         dwc2_hcd_get_maxp(&urb->pipe_info),
1622         dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1623     dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1624     dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1625         urb->buf, (unsigned long)urb->dma);
1626     dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1627         urb->setup_packet, (unsigned long)urb->setup_dma);
1628     dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1629 
1630     /* Core halts the channel for Descriptor DMA mode */
1631     if (hsotg->params.dma_desc_enable) {
1632         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1633                         DWC2_HC_XFER_AHB_ERR);
1634         goto handle_ahberr_done;
1635     }
1636 
1637     dwc2_host_complete(hsotg, qtd, -EIO);
1638 
1639 handle_ahberr_halt:
1640     /*
1641      * Force a channel halt. Don't call dwc2_halt_channel because that won't
1642      * write to the HCCHARn register in DMA mode to force the halt.
1643      */
1644     dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1645 
1646 handle_ahberr_done:
1647     disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1648 }
1649 
1650 /*
1651  * Handles a host channel transaction error interrupt. This handler may be
1652  * called in either DMA mode or Slave mode.
1653  */
1654 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1655                  struct dwc2_host_chan *chan, int chnum,
1656                  struct dwc2_qtd *qtd)
1657 {
1658     dev_dbg(hsotg->dev,
1659         "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1660 
1661     dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1662 
1663     if (hsotg->params.dma_desc_enable) {
1664         dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1665                         DWC2_HC_XFER_XACT_ERR);
1666         goto handle_xacterr_done;
1667     }
1668 
1669     switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1670     case USB_ENDPOINT_XFER_CONTROL:
1671     case USB_ENDPOINT_XFER_BULK:
1672         qtd->error_count++;
1673         if (!chan->qh->ping_state) {
1674             dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1675                           qtd, DWC2_HC_XFER_XACT_ERR);
1676             dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1677             if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1678                 chan->qh->ping_state = 1;
1679         }
1680 
1681         /*
1682          * Halt the channel so the transfer can be re-started from
1683          * the appropriate point or the PING protocol will start
1684          */
1685         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1686         break;
1687     case USB_ENDPOINT_XFER_INT:
1688         qtd->error_count++;
1689         if (chan->do_split && chan->complete_split)
1690             qtd->complete_split = 0;
1691         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1692         break;
1693     case USB_ENDPOINT_XFER_ISOC:
1694         {
1695             enum dwc2_halt_status halt_status;
1696 
1697             halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1698                      chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1699             dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1700         }
1701         break;
1702     }
1703 
1704 handle_xacterr_done:
1705     disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1706 }
1707 
1708 /*
1709  * Handles a host channel frame overrun interrupt. This handler may be called
1710  * in either DMA mode or Slave mode.
1711  */
1712 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1713                   struct dwc2_host_chan *chan, int chnum,
1714                   struct dwc2_qtd *qtd)
1715 {
1716     enum dwc2_halt_status halt_status;
1717 
1718     if (dbg_hc(chan))
1719         dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1720             chnum);
1721 
1722     dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1723 
1724     switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1725     case USB_ENDPOINT_XFER_CONTROL:
1726     case USB_ENDPOINT_XFER_BULK:
1727         break;
1728     case USB_ENDPOINT_XFER_INT:
1729         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1730         break;
1731     case USB_ENDPOINT_XFER_ISOC:
1732         halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1733                     qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1734         dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1735         break;
1736     }
1737 
1738     disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1739 }
1740 
1741 /*
1742  * Handles a host channel data toggle error interrupt. This handler may be
1743  * called in either DMA mode or Slave mode.
1744  */
1745 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1746                     struct dwc2_host_chan *chan, int chnum,
1747                     struct dwc2_qtd *qtd)
1748 {
1749     dev_dbg(hsotg->dev,
1750         "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1751 
1752     if (chan->ep_is_in)
1753         qtd->error_count = 0;
1754     else
1755         dev_err(hsotg->dev,
1756             "Data Toggle Error on OUT transfer, channel %d\n",
1757             chnum);
1758 
1759     dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1760     disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1761 }
1762 
1763 /*
1764  * For debug only. It checks that a valid halt status is set and that
1765  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1766  * taken and a warning is issued.
1767  *
1768  * Return: true if halt status is ok, false otherwise
1769  */
1770 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1771                 struct dwc2_host_chan *chan, int chnum,
1772                 struct dwc2_qtd *qtd)
1773 {
1774 #ifdef DEBUG
1775     u32 hcchar;
1776     u32 hctsiz;
1777     u32 hcintmsk;
1778     u32 hcsplt;
1779 
1780     if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1781         /*
1782          * This code is here only as a check. This condition should
1783          * never happen. Ignore the halt if it does occur.
1784          */
1785         hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1786         hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1787         hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1788         hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1789         dev_dbg(hsotg->dev,
1790             "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1791              __func__);
1792         dev_dbg(hsotg->dev,
1793             "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1794             chnum, hcchar, hctsiz);
1795         dev_dbg(hsotg->dev,
1796             "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1797             chan->hcint, hcintmsk, hcsplt);
1798         if (qtd)
1799             dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1800                 qtd->complete_split);
1801         dev_warn(hsotg->dev,
1802              "%s: no halt status, channel %d, ignoring interrupt\n",
1803              __func__, chnum);
1804         return false;
1805     }
1806 
1807     /*
1808      * This code is here only as a check. hcchar.chdis should never be set
1809      * when the halt interrupt occurs. Halt the channel again if it does
1810      * occur.
1811      */
1812     hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1813     if (hcchar & HCCHAR_CHDIS) {
1814         dev_warn(hsotg->dev,
1815              "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1816              __func__, hcchar);
1817         chan->halt_pending = 0;
1818         dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1819         return false;
1820     }
1821 #endif
1822 
1823     return true;
1824 }
1825 
1826 /*
1827  * Handles a host Channel Halted interrupt in DMA mode. This handler
1828  * determines the reason the channel halted and proceeds accordingly.
1829  */
1830 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1831                     struct dwc2_host_chan *chan, int chnum,
1832                     struct dwc2_qtd *qtd)
1833 {
1834     u32 hcintmsk;
1835     int out_nak_enh = 0;
1836 
1837     if (dbg_hc(chan))
1838         dev_vdbg(hsotg->dev,
1839              "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1840              chnum);
1841 
1842     /*
1843      * For core with OUT NAK enhancement, the flow for high-speed
1844      * CONTROL/BULK OUT is handled a little differently
1845      */
1846     if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1847         if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1848             (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1849              chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1850             out_nak_enh = 1;
1851         }
1852     }
1853 
1854     if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1855         (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1856          !hsotg->params.dma_desc_enable)) {
1857         if (hsotg->params.dma_desc_enable)
1858             dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1859                             chan->halt_status);
1860         else
1861             /*
1862              * Just release the channel. A dequeue can happen on a
1863              * transfer timeout. In the case of an AHB Error, the
1864              * channel was forced to halt because there's no way to
1865              * gracefully recover.
1866              */
1867             dwc2_release_channel(hsotg, chan, qtd,
1868                          chan->halt_status);
1869         return;
1870     }
1871 
1872     hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1873 
1874     if (chan->hcint & HCINTMSK_XFERCOMPL) {
1875         /*
1876          * Todo: This is here because of a possible hardware bug. Spec
1877          * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1878          * interrupt w/ACK bit set should occur, but I only see the
1879          * XFERCOMP bit, even with it masked out. This is a workaround
1880          * for that behavior. Should fix this when hardware is fixed.
1881          */
1882         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1883             dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1884         dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1885     } else if (chan->hcint & HCINTMSK_STALL) {
1886         dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1887     } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1888            !hsotg->params.dma_desc_enable) {
1889         if (out_nak_enh) {
1890             if (chan->hcint &
1891                 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1892                 dev_vdbg(hsotg->dev,
1893                      "XactErr with NYET/NAK/ACK\n");
1894                 qtd->error_count = 0;
1895             } else {
1896                 dev_vdbg(hsotg->dev,
1897                      "XactErr without NYET/NAK/ACK\n");
1898             }
1899         }
1900 
1901         /*
1902          * Must handle xacterr before nak or ack. Could get a xacterr
1903          * at the same time as either of these on a BULK/CONTROL OUT
1904          * that started with a PING. The xacterr takes precedence.
1905          */
1906         dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1907     } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1908            hsotg->params.dma_desc_enable) {
1909         dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1910     } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1911            hsotg->params.dma_desc_enable) {
1912         dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1913     } else if (chan->hcint & HCINTMSK_BBLERR) {
1914         dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1915     } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1916         dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1917     } else if (!out_nak_enh) {
1918         if (chan->hcint & HCINTMSK_NYET) {
1919             /*
1920              * Must handle nyet before nak or ack. Could get a nyet
1921              * at the same time as either of those on a BULK/CONTROL
1922              * OUT that started with a PING. The nyet takes
1923              * precedence.
1924              */
1925             dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1926         } else if ((chan->hcint & HCINTMSK_NAK) &&
1927                !(hcintmsk & HCINTMSK_NAK)) {
1928             /*
1929              * If nak is not masked, it's because a non-split IN
1930              * transfer is in an error state. In that case, the nak
1931              * is handled by the nak interrupt handler, not here.
1932              * Handle nak here for BULK/CONTROL OUT transfers, which
1933              * halt on a NAK to allow rewinding the buffer pointer.
1934              */
1935             dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1936         } else if ((chan->hcint & HCINTMSK_ACK) &&
1937                !(hcintmsk & HCINTMSK_ACK)) {
1938             /*
1939              * If ack is not masked, it's because a non-split IN
1940              * transfer is in an error state. In that case, the ack
1941              * is handled by the ack interrupt handler, not here.
1942              * Handle ack here for split transfers. Start splits
1943              * halt on ACK.
1944              */
1945             dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1946         } else {
1947             if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1948                 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1949                 /*
1950                  * A periodic transfer halted with no other
1951                  * channel interrupts set. Assume it was halted
1952                  * by the core because it could not be completed
1953                  * in its scheduled (micro)frame.
1954                  */
1955                 dev_dbg(hsotg->dev,
1956                     "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1957                     __func__, chnum);
1958                 dwc2_halt_channel(hsotg, chan, qtd,
1959                     DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1960             } else {
1961                 dev_err(hsotg->dev,
1962                     "%s: Channel %d - ChHltd set, but reason is unknown\n",
1963                     __func__, chnum);
1964                 dev_err(hsotg->dev,
1965                     "hcint 0x%08x, intsts 0x%08x\n",
1966                     chan->hcint,
1967                     dwc2_readl(hsotg, GINTSTS));
1968                 goto error;
1969             }
1970         }
1971     } else {
1972         dev_info(hsotg->dev,
1973              "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1974              chan->hcint);
1975 error:
1976         /* Failthrough: use 3-strikes rule */
1977         qtd->error_count++;
1978         dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1979                       qtd, DWC2_HC_XFER_XACT_ERR);
1980         /*
1981          * We can get here after a completed transaction
1982          * (urb->actual_length >= urb->length) which was not reported
1983          * as completed. If that is the case, and we do not abort
1984          * the transfer, a transfer of size 0 will be enqueued
1985          * subsequently. If urb->actual_length is not DMA-aligned,
1986          * the buffer will then point to an unaligned address, and
1987          * the resulting behavior is undefined. Bail out in that
1988          * situation.
1989          */
1990         if (qtd->urb->actual_length >= qtd->urb->length)
1991             qtd->error_count = 3;
1992         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1993         dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1994     }
1995 }
1996 
1997 /*
1998  * Handles a host channel Channel Halted interrupt
1999  *
2000  * In slave mode, this handler is called only when the driver specifically
2001  * requests a halt. This occurs during handling other host channel interrupts
2002  * (e.g. nak, xacterr, stall, nyet, etc.).
2003  *
2004  * In DMA mode, this is the interrupt that occurs when the core has finished
2005  * processing a transfer on a channel. Other host channel interrupts (except
2006  * ahberr) are disabled in DMA mode.
2007  */
2008 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
2009                 struct dwc2_host_chan *chan, int chnum,
2010                 struct dwc2_qtd *qtd)
2011 {
2012     if (dbg_hc(chan))
2013         dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
2014              chnum);
2015 
2016     if (hsotg->params.host_dma) {
2017         dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
2018     } else {
2019         if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
2020             return;
2021         dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
2022     }
2023 }
2024 
2025 /*
2026  * Check if the given qtd is still the top of the list (and thus valid).
2027  *
2028  * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
2029  * the qtd from the top of the list, this will return false (otherwise true).
2030  */
2031 static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2032 {
2033     struct dwc2_qtd *cur_head;
2034 
2035     if (!qh)
2036         return false;
2037 
2038     cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2039                     qtd_list_entry);
2040     return (cur_head == qtd);
2041 }
2042 
2043 /* Handles interrupt for a specific Host Channel */
2044 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2045 {
2046     struct dwc2_qtd *qtd;
2047     struct dwc2_host_chan *chan;
2048     u32 hcint, hcintmsk;
2049 
2050     chan = hsotg->hc_ptr_array[chnum];
2051 
2052     hcint = dwc2_readl(hsotg, HCINT(chnum));
2053     hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
2054     if (!chan) {
2055         dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2056         dwc2_writel(hsotg, hcint, HCINT(chnum));
2057         return;
2058     }
2059 
2060     if (dbg_hc(chan)) {
2061         dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2062              chnum);
2063         dev_vdbg(hsotg->dev,
2064              "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2065              hcint, hcintmsk, hcint & hcintmsk);
2066     }
2067 
2068     dwc2_writel(hsotg, hcint, HCINT(chnum));
2069 
2070     /*
2071      * If we got an interrupt after someone called
2072      * dwc2_hcd_endpoint_disable() we don't want to crash below
2073      */
2074     if (!chan->qh) {
2075         dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
2076         return;
2077     }
2078 
2079     chan->hcint = hcint;
2080     hcint &= hcintmsk;
2081 
2082     /*
2083      * If the channel was halted due to a dequeue, the qtd list might
2084      * be empty or at least the first entry will not be the active qtd.
2085      * In this case, take a shortcut and just release the channel.
2086      */
2087     if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2088         /*
2089          * If the channel was halted, this should be the only
2090          * interrupt unmasked
2091          */
2092         WARN_ON(hcint != HCINTMSK_CHHLTD);
2093         if (hsotg->params.dma_desc_enable)
2094             dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2095                             chan->halt_status);
2096         else
2097             dwc2_release_channel(hsotg, chan, NULL,
2098                          chan->halt_status);
2099         return;
2100     }
2101 
2102     if (list_empty(&chan->qh->qtd_list)) {
2103         /*
2104          * TODO: Will this ever happen with the
2105          * DWC2_HC_XFER_URB_DEQUEUE handling above?
2106          */
2107         dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2108             chnum);
2109         dev_dbg(hsotg->dev,
2110             "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2111             chan->hcint, hcintmsk, hcint);
2112         chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2113         disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2114         chan->hcint = 0;
2115         return;
2116     }
2117 
2118     qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2119                    qtd_list_entry);
2120 
2121     if (!hsotg->params.host_dma) {
2122         if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2123             hcint &= ~HCINTMSK_CHHLTD;
2124     }
2125 
2126     if (hcint & HCINTMSK_XFERCOMPL) {
2127         dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2128         /*
2129          * If NYET occurred at same time as Xfer Complete, the NYET is
2130          * handled by the Xfer Complete interrupt handler. Don't want
2131          * to call the NYET interrupt handler in this case.
2132          */
2133         hcint &= ~HCINTMSK_NYET;
2134     }
2135 
2136     if (hcint & HCINTMSK_CHHLTD) {
2137         dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2138         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2139             goto exit;
2140     }
2141     if (hcint & HCINTMSK_AHBERR) {
2142         dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2143         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2144             goto exit;
2145     }
2146     if (hcint & HCINTMSK_STALL) {
2147         dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2148         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2149             goto exit;
2150     }
2151     if (hcint & HCINTMSK_NAK) {
2152         dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2153         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2154             goto exit;
2155     }
2156     if (hcint & HCINTMSK_ACK) {
2157         dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2158         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2159             goto exit;
2160     }
2161     if (hcint & HCINTMSK_NYET) {
2162         dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2163         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2164             goto exit;
2165     }
2166     if (hcint & HCINTMSK_XACTERR) {
2167         dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2168         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2169             goto exit;
2170     }
2171     if (hcint & HCINTMSK_BBLERR) {
2172         dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2173         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2174             goto exit;
2175     }
2176     if (hcint & HCINTMSK_FRMOVRUN) {
2177         dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2178         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2179             goto exit;
2180     }
2181     if (hcint & HCINTMSK_DATATGLERR) {
2182         dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2183         if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2184             goto exit;
2185     }
2186 
2187 exit:
2188     chan->hcint = 0;
2189 }
2190 
2191 /*
2192  * This interrupt indicates that one or more host channels has a pending
2193  * interrupt. There are multiple conditions that can cause each host channel
2194  * interrupt. This function determines which conditions have occurred for each
2195  * host channel interrupt and handles them appropriately.
2196  */
2197 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2198 {
2199     u32 haint;
2200     int i;
2201     struct dwc2_host_chan *chan, *chan_tmp;
2202 
2203     haint = dwc2_readl(hsotg, HAINT);
2204     if (dbg_perio()) {
2205         dev_vdbg(hsotg->dev, "%s()\n", __func__);
2206 
2207         dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2208     }
2209 
2210     /*
2211      * According to USB 2.0 spec section 11.18.8, a host must
2212      * issue complete-split transactions in a microframe for a
2213      * set of full-/low-speed endpoints in the same relative
2214      * order as the start-splits were issued in a microframe for.
2215      */
2216     list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
2217                  split_order_list_entry) {
2218         int hc_num = chan->hc_num;
2219 
2220         if (haint & (1 << hc_num)) {
2221             dwc2_hc_n_intr(hsotg, hc_num);
2222             haint &= ~(1 << hc_num);
2223         }
2224     }
2225 
2226     for (i = 0; i < hsotg->params.host_channels; i++) {
2227         if (haint & (1 << i))
2228             dwc2_hc_n_intr(hsotg, i);
2229     }
2230 }
2231 
2232 /* This function handles interrupts for the HCD */
2233 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2234 {
2235     u32 gintsts, dbg_gintsts;
2236     irqreturn_t retval = IRQ_NONE;
2237 
2238     if (!dwc2_is_controller_alive(hsotg)) {
2239         dev_warn(hsotg->dev, "Controller is dead\n");
2240         return retval;
2241     }
2242 
2243     spin_lock(&hsotg->lock);
2244 
2245     /* Check if HOST Mode */
2246     if (dwc2_is_host_mode(hsotg)) {
2247         gintsts = dwc2_read_core_intr(hsotg);
2248         if (!gintsts) {
2249             spin_unlock(&hsotg->lock);
2250             return retval;
2251         }
2252 
2253         retval = IRQ_HANDLED;
2254 
2255         dbg_gintsts = gintsts;
2256 #ifndef DEBUG_SOF
2257         dbg_gintsts &= ~GINTSTS_SOF;
2258 #endif
2259         if (!dbg_perio())
2260             dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2261                      GINTSTS_PTXFEMP);
2262 
2263         /* Only print if there are any non-suppressed interrupts left */
2264         if (dbg_gintsts)
2265             dev_vdbg(hsotg->dev,
2266                  "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2267                  gintsts);
2268 
2269         if (gintsts & GINTSTS_SOF)
2270             dwc2_sof_intr(hsotg);
2271         if (gintsts & GINTSTS_RXFLVL)
2272             dwc2_rx_fifo_level_intr(hsotg);
2273         if (gintsts & GINTSTS_NPTXFEMP)
2274             dwc2_np_tx_fifo_empty_intr(hsotg);
2275         if (gintsts & GINTSTS_PRTINT)
2276             dwc2_port_intr(hsotg);
2277         if (gintsts & GINTSTS_HCHINT)
2278             dwc2_hc_intr(hsotg);
2279         if (gintsts & GINTSTS_PTXFEMP)
2280             dwc2_perio_tx_fifo_empty_intr(hsotg);
2281 
2282         if (dbg_gintsts) {
2283             dev_vdbg(hsotg->dev,
2284                  "DWC OTG HCD Finished Servicing Interrupts\n");
2285             dev_vdbg(hsotg->dev,
2286                  "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2287                  dwc2_readl(hsotg, GINTSTS),
2288                  dwc2_readl(hsotg, GINTMSK));
2289         }
2290     }
2291 
2292     spin_unlock(&hsotg->lock);
2293 
2294     return retval;
2295 }