Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /*
0003  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
0004  *
0005  * Copyright (C) 2004-2013 Synopsys, Inc.
0006  *
0007  * Redistribution and use in source and binary forms, with or without
0008  * modification, are permitted provided that the following conditions
0009  * are met:
0010  * 1. Redistributions of source code must retain the above copyright
0011  *    notice, this list of conditions, and the following disclaimer,
0012  *    without modification.
0013  * 2. Redistributions in binary form must reproduce the above copyright
0014  *    notice, this list of conditions and the following disclaimer in the
0015  *    documentation and/or other materials provided with the distribution.
0016  * 3. The names of the above-listed copyright holders may not be used
0017  *    to endorse or promote products derived from this software without
0018  *    specific prior written permission.
0019  *
0020  * ALTERNATIVELY, this software may be distributed under the terms of the
0021  * GNU General Public License ("GPL") as published by the Free Software
0022  * Foundation; either version 2 of the License, or (at your option) any
0023  * later version.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
0026  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
0027  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
0028  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
0029  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
0030  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
0031  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
0032  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
0033  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
0034  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0035  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0036  */
0037 
0038 /*
0039  * This file contains the Descriptor DMA implementation for Host mode
0040  */
0041 #include <linux/kernel.h>
0042 #include <linux/module.h>
0043 #include <linux/spinlock.h>
0044 #include <linux/interrupt.h>
0045 #include <linux/dma-mapping.h>
0046 #include <linux/io.h>
0047 #include <linux/slab.h>
0048 #include <linux/usb.h>
0049 
0050 #include <linux/usb/hcd.h>
0051 #include <linux/usb/ch11.h>
0052 
0053 #include "core.h"
0054 #include "hcd.h"
0055 
0056 static u16 dwc2_frame_list_idx(u16 frame)
0057 {
0058     return frame & (FRLISTEN_64_SIZE - 1);
0059 }
0060 
0061 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
0062 {
0063     return (idx + inc) &
0064         ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
0065           MAX_DMA_DESC_NUM_GENERIC) - 1);
0066 }
0067 
0068 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
0069 {
0070     return (idx - inc) &
0071         ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
0072           MAX_DMA_DESC_NUM_GENERIC) - 1);
0073 }
0074 
0075 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
0076 {
0077     return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
0078         qh->dev_speed == USB_SPEED_HIGH) ?
0079         MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
0080 }
0081 
0082 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
0083 {
0084     return qh->dev_speed == USB_SPEED_HIGH ?
0085            (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
0086 }
0087 
0088 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0089                 gfp_t flags)
0090 {
0091     struct kmem_cache *desc_cache;
0092 
0093     if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
0094         qh->dev_speed == USB_SPEED_HIGH)
0095         desc_cache = hsotg->desc_hsisoc_cache;
0096     else
0097         desc_cache = hsotg->desc_gen_cache;
0098 
0099     qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
0100                         dwc2_max_desc_num(qh);
0101 
0102     qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
0103     if (!qh->desc_list)
0104         return -ENOMEM;
0105 
0106     qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
0107                        qh->desc_list_sz,
0108                        DMA_TO_DEVICE);
0109 
0110     qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
0111     if (!qh->n_bytes) {
0112         dma_unmap_single(hsotg->dev, qh->desc_list_dma,
0113                  qh->desc_list_sz,
0114                  DMA_FROM_DEVICE);
0115         kmem_cache_free(desc_cache, qh->desc_list);
0116         qh->desc_list = NULL;
0117         return -ENOMEM;
0118     }
0119 
0120     return 0;
0121 }
0122 
0123 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
0124 {
0125     struct kmem_cache *desc_cache;
0126 
0127     if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
0128         qh->dev_speed == USB_SPEED_HIGH)
0129         desc_cache = hsotg->desc_hsisoc_cache;
0130     else
0131         desc_cache = hsotg->desc_gen_cache;
0132 
0133     if (qh->desc_list) {
0134         dma_unmap_single(hsotg->dev, qh->desc_list_dma,
0135                  qh->desc_list_sz, DMA_FROM_DEVICE);
0136         kmem_cache_free(desc_cache, qh->desc_list);
0137         qh->desc_list = NULL;
0138     }
0139 
0140     kfree(qh->n_bytes);
0141     qh->n_bytes = NULL;
0142 }
0143 
0144 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
0145 {
0146     if (hsotg->frame_list)
0147         return 0;
0148 
0149     hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
0150     hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
0151     if (!hsotg->frame_list)
0152         return -ENOMEM;
0153 
0154     hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
0155                            hsotg->frame_list_sz,
0156                            DMA_TO_DEVICE);
0157 
0158     return 0;
0159 }
0160 
0161 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
0162 {
0163     unsigned long flags;
0164 
0165     spin_lock_irqsave(&hsotg->lock, flags);
0166 
0167     if (!hsotg->frame_list) {
0168         spin_unlock_irqrestore(&hsotg->lock, flags);
0169         return;
0170     }
0171 
0172     dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
0173              hsotg->frame_list_sz, DMA_FROM_DEVICE);
0174 
0175     kfree(hsotg->frame_list);
0176     hsotg->frame_list = NULL;
0177 
0178     spin_unlock_irqrestore(&hsotg->lock, flags);
0179 }
0180 
0181 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
0182 {
0183     u32 hcfg;
0184     unsigned long flags;
0185 
0186     spin_lock_irqsave(&hsotg->lock, flags);
0187 
0188     hcfg = dwc2_readl(hsotg, HCFG);
0189     if (hcfg & HCFG_PERSCHEDENA) {
0190         /* already enabled */
0191         spin_unlock_irqrestore(&hsotg->lock, flags);
0192         return;
0193     }
0194 
0195     dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
0196 
0197     hcfg &= ~HCFG_FRLISTEN_MASK;
0198     hcfg |= fr_list_en | HCFG_PERSCHEDENA;
0199     dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
0200     dwc2_writel(hsotg, hcfg, HCFG);
0201 
0202     spin_unlock_irqrestore(&hsotg->lock, flags);
0203 }
0204 
0205 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
0206 {
0207     u32 hcfg;
0208     unsigned long flags;
0209 
0210     spin_lock_irqsave(&hsotg->lock, flags);
0211 
0212     hcfg = dwc2_readl(hsotg, HCFG);
0213     if (!(hcfg & HCFG_PERSCHEDENA)) {
0214         /* already disabled */
0215         spin_unlock_irqrestore(&hsotg->lock, flags);
0216         return;
0217     }
0218 
0219     hcfg &= ~HCFG_PERSCHEDENA;
0220     dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
0221     dwc2_writel(hsotg, hcfg, HCFG);
0222 
0223     spin_unlock_irqrestore(&hsotg->lock, flags);
0224 }
0225 
0226 /*
0227  * Activates/Deactivates FrameList entries for the channel based on endpoint
0228  * servicing period
0229  */
0230 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0231                    int enable)
0232 {
0233     struct dwc2_host_chan *chan;
0234     u16 i, j, inc;
0235 
0236     if (!hsotg) {
0237         pr_err("hsotg = %p\n", hsotg);
0238         return;
0239     }
0240 
0241     if (!qh->channel) {
0242         dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
0243         return;
0244     }
0245 
0246     if (!hsotg->frame_list) {
0247         dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
0248             hsotg->frame_list);
0249         return;
0250     }
0251 
0252     chan = qh->channel;
0253     inc = dwc2_frame_incr_val(qh);
0254     if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
0255         i = dwc2_frame_list_idx(qh->next_active_frame);
0256     else
0257         i = 0;
0258 
0259     j = i;
0260     do {
0261         if (enable)
0262             hsotg->frame_list[j] |= 1 << chan->hc_num;
0263         else
0264             hsotg->frame_list[j] &= ~(1 << chan->hc_num);
0265         j = (j + inc) & (FRLISTEN_64_SIZE - 1);
0266     } while (j != i);
0267 
0268     /*
0269      * Sync frame list since controller will access it if periodic
0270      * channel is currently enabled.
0271      */
0272     dma_sync_single_for_device(hsotg->dev,
0273                    hsotg->frame_list_dma,
0274                    hsotg->frame_list_sz,
0275                    DMA_TO_DEVICE);
0276 
0277     if (!enable)
0278         return;
0279 
0280     chan->schinfo = 0;
0281     if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
0282         j = 1;
0283         /* TODO - check this */
0284         inc = (8 + qh->host_interval - 1) / qh->host_interval;
0285         for (i = 0; i < inc; i++) {
0286             chan->schinfo |= j;
0287             j = j << qh->host_interval;
0288         }
0289     } else {
0290         chan->schinfo = 0xff;
0291     }
0292 }
0293 
0294 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
0295                       struct dwc2_qh *qh)
0296 {
0297     struct dwc2_host_chan *chan = qh->channel;
0298 
0299     if (dwc2_qh_is_non_per(qh)) {
0300         if (hsotg->params.uframe_sched)
0301             hsotg->available_host_channels++;
0302         else
0303             hsotg->non_periodic_channels--;
0304     } else {
0305         dwc2_update_frame_list(hsotg, qh, 0);
0306         hsotg->available_host_channels++;
0307     }
0308 
0309     /*
0310      * The condition is added to prevent double cleanup try in case of
0311      * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
0312      */
0313     if (chan->qh) {
0314         if (!list_empty(&chan->hc_list_entry))
0315             list_del(&chan->hc_list_entry);
0316         dwc2_hc_cleanup(hsotg, chan);
0317         list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
0318         chan->qh = NULL;
0319     }
0320 
0321     qh->channel = NULL;
0322     qh->ntd = 0;
0323 
0324     if (qh->desc_list)
0325         memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
0326                dwc2_max_desc_num(qh));
0327 }
0328 
0329 /**
0330  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
0331  * related members
0332  *
0333  * @hsotg: The HCD state structure for the DWC OTG controller
0334  * @qh:    The QH to init
0335  * @mem_flags: Indicates the type of memory allocation
0336  *
0337  * Return: 0 if successful, negative error code otherwise
0338  *
0339  * Allocates memory for the descriptor list. For the first periodic QH,
0340  * allocates memory for the FrameList and enables periodic scheduling.
0341  */
0342 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
0343               gfp_t mem_flags)
0344 {
0345     int retval;
0346 
0347     if (qh->do_split) {
0348         dev_err(hsotg->dev,
0349             "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
0350         retval = -EINVAL;
0351         goto err0;
0352     }
0353 
0354     retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
0355     if (retval)
0356         goto err0;
0357 
0358     if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
0359         qh->ep_type == USB_ENDPOINT_XFER_INT) {
0360         if (!hsotg->frame_list) {
0361             retval = dwc2_frame_list_alloc(hsotg, mem_flags);
0362             if (retval)
0363                 goto err1;
0364             /* Enable periodic schedule on first periodic QH */
0365             dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
0366         }
0367     }
0368 
0369     qh->ntd = 0;
0370     return 0;
0371 
0372 err1:
0373     dwc2_desc_list_free(hsotg, qh);
0374 err0:
0375     return retval;
0376 }
0377 
0378 /**
0379  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
0380  * members
0381  *
0382  * @hsotg: The HCD state structure for the DWC OTG controller
0383  * @qh:    The QH to free
0384  *
0385  * Frees descriptor list memory associated with the QH. If QH is periodic and
0386  * the last, frees FrameList memory and disables periodic scheduling.
0387  */
0388 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
0389 {
0390     unsigned long flags;
0391 
0392     dwc2_desc_list_free(hsotg, qh);
0393 
0394     /*
0395      * Channel still assigned due to some reasons.
0396      * Seen on Isoc URB dequeue. Channel halted but no subsequent
0397      * ChHalted interrupt to release the channel. Afterwards
0398      * when it comes here from endpoint disable routine
0399      * channel remains assigned.
0400      */
0401     spin_lock_irqsave(&hsotg->lock, flags);
0402     if (qh->channel)
0403         dwc2_release_channel_ddma(hsotg, qh);
0404     spin_unlock_irqrestore(&hsotg->lock, flags);
0405 
0406     if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
0407          qh->ep_type == USB_ENDPOINT_XFER_INT) &&
0408         (hsotg->params.uframe_sched ||
0409          !hsotg->periodic_channels) && hsotg->frame_list) {
0410         dwc2_per_sched_disable(hsotg);
0411         dwc2_frame_list_free(hsotg);
0412     }
0413 }
0414 
0415 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
0416 {
0417     if (qh->dev_speed == USB_SPEED_HIGH)
0418         /* Descriptor set (8 descriptors) index which is 8-aligned */
0419         return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
0420     else
0421         return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
0422 }
0423 
0424 /*
0425  * Determine starting frame for Isochronous transfer.
0426  * Few frames skipped to prevent race condition with HC.
0427  */
0428 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
0429                     struct dwc2_qh *qh, u16 *skip_frames)
0430 {
0431     u16 frame;
0432 
0433     hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
0434 
0435     /*
0436      * next_active_frame is always frame number (not uFrame) both in FS
0437      * and HS!
0438      */
0439 
0440     /*
0441      * skip_frames is used to limit activated descriptors number
0442      * to avoid the situation when HC services the last activated
0443      * descriptor firstly.
0444      * Example for FS:
0445      * Current frame is 1, scheduled frame is 3. Since HC always fetches
0446      * the descriptor corresponding to curr_frame+1, the descriptor
0447      * corresponding to frame 2 will be fetched. If the number of
0448      * descriptors is max=64 (or greather) the list will be fully programmed
0449      * with Active descriptors and it is possible case (rare) that the
0450      * latest descriptor(considering rollback) corresponding to frame 2 will
0451      * be serviced first. HS case is more probable because, in fact, up to
0452      * 11 uframes (16 in the code) may be skipped.
0453      */
0454     if (qh->dev_speed == USB_SPEED_HIGH) {
0455         /*
0456          * Consider uframe counter also, to start xfer asap. If half of
0457          * the frame elapsed skip 2 frames otherwise just 1 frame.
0458          * Starting descriptor index must be 8-aligned, so if the
0459          * current frame is near to complete the next one is skipped as
0460          * well.
0461          */
0462         if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
0463             *skip_frames = 2 * 8;
0464             frame = dwc2_frame_num_inc(hsotg->frame_number,
0465                            *skip_frames);
0466         } else {
0467             *skip_frames = 1 * 8;
0468             frame = dwc2_frame_num_inc(hsotg->frame_number,
0469                            *skip_frames);
0470         }
0471 
0472         frame = dwc2_full_frame_num(frame);
0473     } else {
0474         /*
0475          * Two frames are skipped for FS - the current and the next.
0476          * But for descriptor programming, 1 frame (descriptor) is
0477          * enough, see example above.
0478          */
0479         *skip_frames = 1;
0480         frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
0481     }
0482 
0483     return frame;
0484 }
0485 
0486 /*
0487  * Calculate initial descriptor index for isochronous transfer based on
0488  * scheduled frame
0489  */
0490 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
0491                     struct dwc2_qh *qh)
0492 {
0493     u16 frame, fr_idx, fr_idx_tmp, skip_frames;
0494 
0495     /*
0496      * With current ISOC processing algorithm the channel is being released
0497      * when no more QTDs in the list (qh->ntd == 0). Thus this function is
0498      * called only when qh->ntd == 0 and qh->channel == 0.
0499      *
0500      * So qh->channel != NULL branch is not used and just not removed from
0501      * the source file. It is required for another possible approach which
0502      * is, do not disable and release the channel when ISOC session
0503      * completed, just move QH to inactive schedule until new QTD arrives.
0504      * On new QTD, the QH moved back to 'ready' schedule, starting frame and
0505      * therefore starting desc_index are recalculated. In this case channel
0506      * is released only on ep_disable.
0507      */
0508 
0509     /*
0510      * Calculate starting descriptor index. For INTERRUPT endpoint it is
0511      * always 0.
0512      */
0513     if (qh->channel) {
0514         frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
0515         /*
0516          * Calculate initial descriptor index based on FrameList current
0517          * bitmap and servicing period
0518          */
0519         fr_idx_tmp = dwc2_frame_list_idx(frame);
0520         fr_idx = (FRLISTEN_64_SIZE +
0521               dwc2_frame_list_idx(qh->next_active_frame) -
0522               fr_idx_tmp) % dwc2_frame_incr_val(qh);
0523         fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
0524     } else {
0525         qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
0526                                &skip_frames);
0527         fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
0528     }
0529 
0530     qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
0531 
0532     return skip_frames;
0533 }
0534 
0535 #define ISOC_URB_GIVEBACK_ASAP
0536 
0537 #define MAX_ISOC_XFER_SIZE_FS   1023
0538 #define MAX_ISOC_XFER_SIZE_HS   3072
0539 #define DESCNUM_THRESHOLD   4
0540 
0541 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
0542                      struct dwc2_qtd *qtd,
0543                      struct dwc2_qh *qh, u32 max_xfer_size,
0544                      u16 idx)
0545 {
0546     struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
0547     struct dwc2_hcd_iso_packet_desc *frame_desc;
0548 
0549     memset(dma_desc, 0, sizeof(*dma_desc));
0550     frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
0551 
0552     if (frame_desc->length > max_xfer_size)
0553         qh->n_bytes[idx] = max_xfer_size;
0554     else
0555         qh->n_bytes[idx] = frame_desc->length;
0556 
0557     dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
0558     dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
0559                HOST_DMA_ISOC_NBYTES_MASK;
0560 
0561     /* Set active bit */
0562     dma_desc->status |= HOST_DMA_A;
0563 
0564     qh->ntd++;
0565     qtd->isoc_frame_index_last++;
0566 
0567 #ifdef ISOC_URB_GIVEBACK_ASAP
0568     /* Set IOC for each descriptor corresponding to last frame of URB */
0569     if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
0570         dma_desc->status |= HOST_DMA_IOC;
0571 #endif
0572 
0573     dma_sync_single_for_device(hsotg->dev,
0574                    qh->desc_list_dma +
0575             (idx * sizeof(struct dwc2_dma_desc)),
0576             sizeof(struct dwc2_dma_desc),
0577             DMA_TO_DEVICE);
0578 }
0579 
0580 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
0581                     struct dwc2_qh *qh, u16 skip_frames)
0582 {
0583     struct dwc2_qtd *qtd;
0584     u32 max_xfer_size;
0585     u16 idx, inc, n_desc = 0, ntd_max = 0;
0586     u16 cur_idx;
0587     u16 next_idx;
0588 
0589     idx = qh->td_last;
0590     inc = qh->host_interval;
0591     hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
0592     cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
0593     next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
0594 
0595     /*
0596      * Ensure current frame number didn't overstep last scheduled
0597      * descriptor. If it happens, the only way to recover is to move
0598      * qh->td_last to current frame number + 1.
0599      * So that next isoc descriptor will be scheduled on frame number + 1
0600      * and not on a past frame.
0601      */
0602     if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
0603         if (inc < 32) {
0604             dev_vdbg(hsotg->dev,
0605                  "current frame number overstep last descriptor\n");
0606             qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
0607                                 qh->dev_speed);
0608             idx = qh->td_last;
0609         }
0610     }
0611 
0612     if (qh->host_interval) {
0613         ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
0614                 qh->host_interval;
0615         if (skip_frames && !qh->channel)
0616             ntd_max -= skip_frames / qh->host_interval;
0617     }
0618 
0619     max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
0620             MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
0621 
0622     list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
0623         if (qtd->in_process &&
0624             qtd->isoc_frame_index_last ==
0625             qtd->urb->packet_count)
0626             continue;
0627 
0628         qtd->isoc_td_first = idx;
0629         while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
0630                         qtd->urb->packet_count) {
0631             dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
0632                              max_xfer_size, idx);
0633             idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
0634             n_desc++;
0635         }
0636         qtd->isoc_td_last = idx;
0637         qtd->in_process = 1;
0638     }
0639 
0640     qh->td_last = idx;
0641 
0642 #ifdef ISOC_URB_GIVEBACK_ASAP
0643     /* Set IOC for last descriptor if descriptor list is full */
0644     if (qh->ntd == ntd_max) {
0645         idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
0646         qh->desc_list[idx].status |= HOST_DMA_IOC;
0647         dma_sync_single_for_device(hsotg->dev,
0648                        qh->desc_list_dma + (idx *
0649                        sizeof(struct dwc2_dma_desc)),
0650                        sizeof(struct dwc2_dma_desc),
0651                        DMA_TO_DEVICE);
0652     }
0653 #else
0654     /*
0655      * Set IOC bit only for one descriptor. Always try to be ahead of HW
0656      * processing, i.e. on IOC generation driver activates next descriptor
0657      * but core continues to process descriptors following the one with IOC
0658      * set.
0659      */
0660 
0661     if (n_desc > DESCNUM_THRESHOLD)
0662         /*
0663          * Move IOC "up". Required even if there is only one QTD
0664          * in the list, because QTDs might continue to be queued,
0665          * but during the activation it was only one queued.
0666          * Actually more than one QTD might be in the list if this
0667          * function called from XferCompletion - QTDs was queued during
0668          * HW processing of the previous descriptor chunk.
0669          */
0670         idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
0671                         qh->dev_speed);
0672     else
0673         /*
0674          * Set the IOC for the latest descriptor if either number of
0675          * descriptors is not greater than threshold or no more new
0676          * descriptors activated
0677          */
0678         idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
0679 
0680     qh->desc_list[idx].status |= HOST_DMA_IOC;
0681     dma_sync_single_for_device(hsotg->dev,
0682                    qh->desc_list_dma +
0683                    (idx * sizeof(struct dwc2_dma_desc)),
0684                    sizeof(struct dwc2_dma_desc),
0685                    DMA_TO_DEVICE);
0686 #endif
0687 }
0688 
0689 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
0690                     struct dwc2_host_chan *chan,
0691                     struct dwc2_qtd *qtd, struct dwc2_qh *qh,
0692                     int n_desc)
0693 {
0694     struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
0695     int len = chan->xfer_len;
0696 
0697     if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
0698         len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
0699 
0700     if (chan->ep_is_in) {
0701         int num_packets;
0702 
0703         if (len > 0 && chan->max_packet)
0704             num_packets = (len + chan->max_packet - 1)
0705                     / chan->max_packet;
0706         else
0707             /* Need 1 packet for transfer length of 0 */
0708             num_packets = 1;
0709 
0710         /* Always program an integral # of packets for IN transfers */
0711         len = num_packets * chan->max_packet;
0712     }
0713 
0714     dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
0715     qh->n_bytes[n_desc] = len;
0716 
0717     if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
0718         qtd->control_phase == DWC2_CONTROL_SETUP)
0719         dma_desc->status |= HOST_DMA_SUP;
0720 
0721     dma_desc->buf = (u32)chan->xfer_dma;
0722 
0723     dma_sync_single_for_device(hsotg->dev,
0724                    qh->desc_list_dma +
0725                    (n_desc * sizeof(struct dwc2_dma_desc)),
0726                    sizeof(struct dwc2_dma_desc),
0727                    DMA_TO_DEVICE);
0728 
0729     /*
0730      * Last (or only) descriptor of IN transfer with actual size less
0731      * than MaxPacket
0732      */
0733     if (len > chan->xfer_len) {
0734         chan->xfer_len = 0;
0735     } else {
0736         chan->xfer_dma += len;
0737         chan->xfer_len -= len;
0738     }
0739 }
0740 
0741 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
0742                     struct dwc2_qh *qh)
0743 {
0744     struct dwc2_qtd *qtd;
0745     struct dwc2_host_chan *chan = qh->channel;
0746     int n_desc = 0;
0747 
0748     dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
0749          (unsigned long)chan->xfer_dma, chan->xfer_len);
0750 
0751     /*
0752      * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
0753      * if SG transfer consists of multiple URBs, this pointer is re-assigned
0754      * to the buffer of the currently processed QTD. For non-SG request
0755      * there is always one QTD active.
0756      */
0757 
0758     list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
0759         dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
0760 
0761         if (n_desc) {
0762             /* SG request - more than 1 QTD */
0763             chan->xfer_dma = qtd->urb->dma +
0764                     qtd->urb->actual_length;
0765             chan->xfer_len = qtd->urb->length -
0766                     qtd->urb->actual_length;
0767             dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
0768                  (unsigned long)chan->xfer_dma, chan->xfer_len);
0769         }
0770 
0771         qtd->n_desc = 0;
0772         do {
0773             if (n_desc > 1) {
0774                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
0775                 dev_vdbg(hsotg->dev,
0776                      "set A bit in desc %d (%p)\n",
0777                      n_desc - 1,
0778                      &qh->desc_list[n_desc - 1]);
0779                 dma_sync_single_for_device(hsotg->dev,
0780                                qh->desc_list_dma +
0781                     ((n_desc - 1) *
0782                     sizeof(struct dwc2_dma_desc)),
0783                     sizeof(struct dwc2_dma_desc),
0784                     DMA_TO_DEVICE);
0785             }
0786             dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
0787             dev_vdbg(hsotg->dev,
0788                  "desc %d (%p) buf=%08x status=%08x\n",
0789                  n_desc, &qh->desc_list[n_desc],
0790                  qh->desc_list[n_desc].buf,
0791                  qh->desc_list[n_desc].status);
0792             qtd->n_desc++;
0793             n_desc++;
0794         } while (chan->xfer_len > 0 &&
0795              n_desc != MAX_DMA_DESC_NUM_GENERIC);
0796 
0797         dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
0798         qtd->in_process = 1;
0799         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
0800             break;
0801         if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
0802             break;
0803     }
0804 
0805     if (n_desc) {
0806         qh->desc_list[n_desc - 1].status |=
0807                 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
0808         dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
0809              n_desc - 1, &qh->desc_list[n_desc - 1]);
0810         dma_sync_single_for_device(hsotg->dev,
0811                        qh->desc_list_dma + (n_desc - 1) *
0812                        sizeof(struct dwc2_dma_desc),
0813                        sizeof(struct dwc2_dma_desc),
0814                        DMA_TO_DEVICE);
0815         if (n_desc > 1) {
0816             qh->desc_list[0].status |= HOST_DMA_A;
0817             dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
0818                  &qh->desc_list[0]);
0819             dma_sync_single_for_device(hsotg->dev,
0820                            qh->desc_list_dma,
0821                     sizeof(struct dwc2_dma_desc),
0822                     DMA_TO_DEVICE);
0823         }
0824         chan->ntd = n_desc;
0825     }
0826 }
0827 
0828 /**
0829  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
0830  *
0831  * @hsotg: The HCD state structure for the DWC OTG controller
0832  * @qh:    The QH to init
0833  *
0834  * Return: 0 if successful, negative error code otherwise
0835  *
0836  * For Control and Bulk endpoints, initializes descriptor list and starts the
0837  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
0838  * list then updates FrameList, marking appropriate entries as active.
0839  *
0840  * For Isochronous endpoints the starting descriptor index is calculated based
0841  * on the scheduled frame, but only on the first transfer descriptor within a
0842  * session. Then the transfer is started via enabling the channel.
0843  *
0844  * For Isochronous endpoints the channel is not halted on XferComplete
0845  * interrupt so remains assigned to the endpoint(QH) until session is done.
0846  */
0847 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
0848 {
0849     /* Channel is already assigned */
0850     struct dwc2_host_chan *chan = qh->channel;
0851     u16 skip_frames = 0;
0852 
0853     switch (chan->ep_type) {
0854     case USB_ENDPOINT_XFER_CONTROL:
0855     case USB_ENDPOINT_XFER_BULK:
0856         dwc2_init_non_isoc_dma_desc(hsotg, qh);
0857         dwc2_hc_start_transfer_ddma(hsotg, chan);
0858         break;
0859     case USB_ENDPOINT_XFER_INT:
0860         dwc2_init_non_isoc_dma_desc(hsotg, qh);
0861         dwc2_update_frame_list(hsotg, qh, 1);
0862         dwc2_hc_start_transfer_ddma(hsotg, chan);
0863         break;
0864     case USB_ENDPOINT_XFER_ISOC:
0865         if (!qh->ntd)
0866             skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
0867         dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
0868 
0869         if (!chan->xfer_started) {
0870             dwc2_update_frame_list(hsotg, qh, 1);
0871 
0872             /*
0873              * Always set to max, instead of actual size. Otherwise
0874              * ntd will be changed with channel being enabled. Not
0875              * recommended.
0876              */
0877             chan->ntd = dwc2_max_desc_num(qh);
0878 
0879             /* Enable channel only once for ISOC */
0880             dwc2_hc_start_transfer_ddma(hsotg, chan);
0881         }
0882 
0883         break;
0884     default:
0885         break;
0886     }
0887 }
0888 
0889 #define DWC2_CMPL_DONE      1
0890 #define DWC2_CMPL_STOP      2
0891 
0892 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
0893                     struct dwc2_host_chan *chan,
0894                     struct dwc2_qtd *qtd,
0895                     struct dwc2_qh *qh, u16 idx)
0896 {
0897     struct dwc2_dma_desc *dma_desc;
0898     struct dwc2_hcd_iso_packet_desc *frame_desc;
0899     u16 remain = 0;
0900     int rc = 0;
0901 
0902     if (!qtd->urb)
0903         return -EINVAL;
0904 
0905     dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
0906                 sizeof(struct dwc2_dma_desc)),
0907                 sizeof(struct dwc2_dma_desc),
0908                 DMA_FROM_DEVICE);
0909 
0910     dma_desc = &qh->desc_list[idx];
0911 
0912     frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
0913     dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
0914     if (chan->ep_is_in)
0915         remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
0916              HOST_DMA_ISOC_NBYTES_SHIFT;
0917 
0918     if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
0919         /*
0920          * XactError, or unable to complete all the transactions
0921          * in the scheduled micro-frame/frame, both indicated by
0922          * HOST_DMA_STS_PKTERR
0923          */
0924         qtd->urb->error_count++;
0925         frame_desc->actual_length = qh->n_bytes[idx] - remain;
0926         frame_desc->status = -EPROTO;
0927     } else {
0928         /* Success */
0929         frame_desc->actual_length = qh->n_bytes[idx] - remain;
0930         frame_desc->status = 0;
0931     }
0932 
0933     if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
0934         /*
0935          * urb->status is not used for isoc transfers here. The
0936          * individual frame_desc status are used instead.
0937          */
0938         dwc2_host_complete(hsotg, qtd, 0);
0939         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
0940 
0941         /*
0942          * This check is necessary because urb_dequeue can be called
0943          * from urb complete callback (sound driver for example). All
0944          * pending URBs are dequeued there, so no need for further
0945          * processing.
0946          */
0947         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
0948             return -1;
0949         rc = DWC2_CMPL_DONE;
0950     }
0951 
0952     qh->ntd--;
0953 
0954     /* Stop if IOC requested descriptor reached */
0955     if (dma_desc->status & HOST_DMA_IOC)
0956         rc = DWC2_CMPL_STOP;
0957 
0958     return rc;
0959 }
0960 
0961 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
0962                      struct dwc2_host_chan *chan,
0963                      enum dwc2_halt_status halt_status)
0964 {
0965     struct dwc2_hcd_iso_packet_desc *frame_desc;
0966     struct dwc2_qtd *qtd, *qtd_tmp;
0967     struct dwc2_qh *qh;
0968     u16 idx;
0969     int rc;
0970 
0971     qh = chan->qh;
0972     idx = qh->td_first;
0973 
0974     if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
0975         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
0976             qtd->in_process = 0;
0977         return;
0978     }
0979 
0980     if (halt_status == DWC2_HC_XFER_AHB_ERR ||
0981         halt_status == DWC2_HC_XFER_BABBLE_ERR) {
0982         /*
0983          * Channel is halted in these error cases, considered as serious
0984          * issues.
0985          * Complete all URBs marking all frames as failed, irrespective
0986          * whether some of the descriptors (frames) succeeded or not.
0987          * Pass error code to completion routine as well, to update
0988          * urb->status, some of class drivers might use it to stop
0989          * queing transfer requests.
0990          */
0991         int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
0992               -EIO : -EOVERFLOW;
0993 
0994         list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
0995                      qtd_list_entry) {
0996             if (qtd->urb) {
0997                 for (idx = 0; idx < qtd->urb->packet_count;
0998                      idx++) {
0999                     frame_desc = &qtd->urb->iso_descs[idx];
1000                     frame_desc->status = err;
1001                 }
1002 
1003                 dwc2_host_complete(hsotg, qtd, err);
1004             }
1005 
1006             dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1007         }
1008 
1009         return;
1010     }
1011 
1012     list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1013         if (!qtd->in_process)
1014             break;
1015 
1016         /*
1017          * Ensure idx corresponds to descriptor where first urb of this
1018          * qtd was added. In fact, during isoc desc init, dwc2 may skip
1019          * an index if current frame number is already over this index.
1020          */
1021         if (idx != qtd->isoc_td_first) {
1022             dev_vdbg(hsotg->dev,
1023                  "try to complete %d instead of %d\n",
1024                  idx, qtd->isoc_td_first);
1025             idx = qtd->isoc_td_first;
1026         }
1027 
1028         do {
1029             struct dwc2_qtd *qtd_next;
1030             u16 cur_idx;
1031 
1032             rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1033                               idx);
1034             if (rc < 0)
1035                 return;
1036             idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
1037                             chan->speed);
1038             if (!rc)
1039                 continue;
1040 
1041             if (rc == DWC2_CMPL_DONE)
1042                 break;
1043 
1044             /* rc == DWC2_CMPL_STOP */
1045 
1046             if (qh->host_interval >= 32)
1047                 goto stop_scan;
1048 
1049             qh->td_first = idx;
1050             cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1051             qtd_next = list_first_entry(&qh->qtd_list,
1052                             struct dwc2_qtd,
1053                             qtd_list_entry);
1054             if (dwc2_frame_idx_num_gt(cur_idx,
1055                           qtd_next->isoc_td_last))
1056                 break;
1057 
1058             goto stop_scan;
1059 
1060         } while (idx != qh->td_first);
1061     }
1062 
1063 stop_scan:
1064     qh->td_first = idx;
1065 }
1066 
1067 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1068                            struct dwc2_host_chan *chan,
1069                     struct dwc2_qtd *qtd,
1070                     struct dwc2_dma_desc *dma_desc,
1071                     enum dwc2_halt_status halt_status,
1072                     u32 n_bytes, int *xfer_done)
1073 {
1074     struct dwc2_hcd_urb *urb = qtd->urb;
1075     u16 remain = 0;
1076 
1077     if (chan->ep_is_in)
1078         remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1079              HOST_DMA_NBYTES_SHIFT;
1080 
1081     dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1082 
1083     if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1084         dev_err(hsotg->dev, "EIO\n");
1085         urb->status = -EIO;
1086         return 1;
1087     }
1088 
1089     if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1090         switch (halt_status) {
1091         case DWC2_HC_XFER_STALL:
1092             dev_vdbg(hsotg->dev, "Stall\n");
1093             urb->status = -EPIPE;
1094             break;
1095         case DWC2_HC_XFER_BABBLE_ERR:
1096             dev_err(hsotg->dev, "Babble\n");
1097             urb->status = -EOVERFLOW;
1098             break;
1099         case DWC2_HC_XFER_XACT_ERR:
1100             dev_err(hsotg->dev, "XactErr\n");
1101             urb->status = -EPROTO;
1102             break;
1103         default:
1104             dev_err(hsotg->dev,
1105                 "%s: Unhandled descriptor error status (%d)\n",
1106                 __func__, halt_status);
1107             break;
1108         }
1109         return 1;
1110     }
1111 
1112     if (dma_desc->status & HOST_DMA_A) {
1113         dev_vdbg(hsotg->dev,
1114              "Active descriptor encountered on channel %d\n",
1115              chan->hc_num);
1116         return 0;
1117     }
1118 
1119     if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1120         if (qtd->control_phase == DWC2_CONTROL_DATA) {
1121             urb->actual_length += n_bytes - remain;
1122             if (remain || urb->actual_length >= urb->length) {
1123                 /*
1124                  * For Control Data stage do not set urb->status
1125                  * to 0, to prevent URB callback. Set it when
1126                  * Status phase is done. See below.
1127                  */
1128                 *xfer_done = 1;
1129             }
1130         } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1131             urb->status = 0;
1132             *xfer_done = 1;
1133         }
1134         /* No handling for SETUP stage */
1135     } else {
1136         /* BULK and INTR */
1137         urb->actual_length += n_bytes - remain;
1138         dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1139              urb->actual_length);
1140         if (remain || urb->actual_length >= urb->length) {
1141             urb->status = 0;
1142             *xfer_done = 1;
1143         }
1144     }
1145 
1146     return 0;
1147 }
1148 
1149 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1150                       struct dwc2_host_chan *chan,
1151                       int chnum, struct dwc2_qtd *qtd,
1152                       int desc_num,
1153                       enum dwc2_halt_status halt_status,
1154                       int *xfer_done)
1155 {
1156     struct dwc2_qh *qh = chan->qh;
1157     struct dwc2_hcd_urb *urb = qtd->urb;
1158     struct dwc2_dma_desc *dma_desc;
1159     u32 n_bytes;
1160     int failed;
1161 
1162     dev_vdbg(hsotg->dev, "%s()\n", __func__);
1163 
1164     if (!urb)
1165         return -EINVAL;
1166 
1167     dma_sync_single_for_cpu(hsotg->dev,
1168                 qh->desc_list_dma + (desc_num *
1169                 sizeof(struct dwc2_dma_desc)),
1170                 sizeof(struct dwc2_dma_desc),
1171                 DMA_FROM_DEVICE);
1172 
1173     dma_desc = &qh->desc_list[desc_num];
1174     n_bytes = qh->n_bytes[desc_num];
1175     dev_vdbg(hsotg->dev,
1176          "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1177          qtd, urb, desc_num, dma_desc, n_bytes);
1178     failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1179                              halt_status, n_bytes,
1180                              xfer_done);
1181     if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1182         dwc2_host_complete(hsotg, qtd, urb->status);
1183         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1184         dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1185              failed, *xfer_done);
1186         return failed;
1187     }
1188 
1189     if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1190         switch (qtd->control_phase) {
1191         case DWC2_CONTROL_SETUP:
1192             if (urb->length > 0)
1193                 qtd->control_phase = DWC2_CONTROL_DATA;
1194             else
1195                 qtd->control_phase = DWC2_CONTROL_STATUS;
1196             dev_vdbg(hsotg->dev,
1197                  "  Control setup transaction done\n");
1198             break;
1199         case DWC2_CONTROL_DATA:
1200             if (*xfer_done) {
1201                 qtd->control_phase = DWC2_CONTROL_STATUS;
1202                 dev_vdbg(hsotg->dev,
1203                      "  Control data transfer done\n");
1204             } else if (desc_num + 1 == qtd->n_desc) {
1205                 /*
1206                  * Last descriptor for Control data stage which
1207                  * is not completed yet
1208                  */
1209                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1210                               qtd);
1211             }
1212             break;
1213         default:
1214             break;
1215         }
1216     }
1217 
1218     return 0;
1219 }
1220 
1221 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1222                          struct dwc2_host_chan *chan,
1223                          int chnum,
1224                          enum dwc2_halt_status halt_status)
1225 {
1226     struct list_head *qtd_item, *qtd_tmp;
1227     struct dwc2_qh *qh = chan->qh;
1228     struct dwc2_qtd *qtd = NULL;
1229     int xfer_done;
1230     int desc_num = 0;
1231 
1232     if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1233         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1234             qtd->in_process = 0;
1235         return;
1236     }
1237 
1238     list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1239         int i;
1240         int qtd_desc_count;
1241 
1242         qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1243         xfer_done = 0;
1244         qtd_desc_count = qtd->n_desc;
1245 
1246         for (i = 0; i < qtd_desc_count; i++) {
1247             if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1248                                desc_num, halt_status,
1249                                &xfer_done)) {
1250                 qtd = NULL;
1251                 goto stop_scan;
1252             }
1253 
1254             desc_num++;
1255         }
1256     }
1257 
1258 stop_scan:
1259     if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1260         /*
1261          * Resetting the data toggle for bulk and interrupt endpoints
1262          * in case of stall. See handle_hc_stall_intr().
1263          */
1264         if (halt_status == DWC2_HC_XFER_STALL)
1265             qh->data_toggle = DWC2_HC_PID_DATA0;
1266         else
1267             dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
1268     }
1269 
1270     if (halt_status == DWC2_HC_XFER_COMPLETE) {
1271         if (chan->hcint & HCINTMSK_NYET) {
1272             /*
1273              * Got a NYET on the last transaction of the transfer.
1274              * It means that the endpoint should be in the PING
1275              * state at the beginning of the next transfer.
1276              */
1277             qh->ping_state = 1;
1278         }
1279     }
1280 }
1281 
1282 /**
1283  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1284  * status and calls completion routine for the URB if it's done. Called from
1285  * interrupt handlers.
1286  *
1287  * @hsotg:       The HCD state structure for the DWC OTG controller
1288  * @chan:        Host channel the transfer is completed on
1289  * @chnum:       Index of Host channel registers
1290  * @halt_status: Reason the channel is being halted or just XferComplete
1291  *               for isochronous transfers
1292  *
1293  * Releases the channel to be used by other transfers.
1294  * In case of Isochronous endpoint the channel is not halted until the end of
1295  * the session, i.e. QTD list is empty.
1296  * If periodic channel released the FrameList is updated accordingly.
1297  * Calls transaction selection routines to activate pending transfers.
1298  */
1299 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1300                  struct dwc2_host_chan *chan, int chnum,
1301                  enum dwc2_halt_status halt_status)
1302 {
1303     struct dwc2_qh *qh = chan->qh;
1304     int continue_isoc_xfer = 0;
1305     enum dwc2_transaction_type tr_type;
1306 
1307     if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1308         dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1309 
1310         /* Release the channel if halted or session completed */
1311         if (halt_status != DWC2_HC_XFER_COMPLETE ||
1312             list_empty(&qh->qtd_list)) {
1313             struct dwc2_qtd *qtd, *qtd_tmp;
1314 
1315             /*
1316              * Kill all remainings QTDs since channel has been
1317              * halted.
1318              */
1319             list_for_each_entry_safe(qtd, qtd_tmp,
1320                          &qh->qtd_list,
1321                          qtd_list_entry) {
1322                 dwc2_host_complete(hsotg, qtd,
1323                            -ECONNRESET);
1324                 dwc2_hcd_qtd_unlink_and_free(hsotg,
1325                                  qtd, qh);
1326             }
1327 
1328             /* Halt the channel if session completed */
1329             if (halt_status == DWC2_HC_XFER_COMPLETE)
1330                 dwc2_hc_halt(hsotg, chan, halt_status);
1331             dwc2_release_channel_ddma(hsotg, qh);
1332             dwc2_hcd_qh_unlink(hsotg, qh);
1333         } else {
1334             /* Keep in assigned schedule to continue transfer */
1335             list_move_tail(&qh->qh_list_entry,
1336                        &hsotg->periodic_sched_assigned);
1337             /*
1338              * If channel has been halted during giveback of urb
1339              * then prevent any new scheduling.
1340              */
1341             if (!chan->halt_status)
1342                 continue_isoc_xfer = 1;
1343         }
1344         /*
1345          * Todo: Consider the case when period exceeds FrameList size.
1346          * Frame Rollover interrupt should be used.
1347          */
1348     } else {
1349         /*
1350          * Scan descriptor list to complete the URB(s), then release
1351          * the channel
1352          */
1353         dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1354                          halt_status);
1355         dwc2_release_channel_ddma(hsotg, qh);
1356         dwc2_hcd_qh_unlink(hsotg, qh);
1357 
1358         if (!list_empty(&qh->qtd_list)) {
1359             /*
1360              * Add back to inactive non-periodic schedule on normal
1361              * completion
1362              */
1363             dwc2_hcd_qh_add(hsotg, qh);
1364         }
1365     }
1366 
1367     tr_type = dwc2_hcd_select_transactions(hsotg);
1368     if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1369         if (continue_isoc_xfer) {
1370             if (tr_type == DWC2_TRANSACTION_NONE)
1371                 tr_type = DWC2_TRANSACTION_PERIODIC;
1372             else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1373                 tr_type = DWC2_TRANSACTION_ALL;
1374         }
1375         dwc2_hcd_queue_transactions(hsotg, tr_type);
1376     }
1377 }