Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (c) 2001 by David Brownell
0004  */
0005 
0006 /* this file is part of ehci-hcd.c */
0007 
0008 /*-------------------------------------------------------------------------*/
0009 
0010 /*
0011  * There's basically three types of memory:
0012  *  - data used only by the HCD ... kmalloc is fine
0013  *  - async and periodic schedules, shared by HC and HCD ... these
0014  *    need to use dma_pool or dma_alloc_coherent
0015  *  - driver buffers, read/written by HC ... single shot DMA mapped
0016  *
0017  * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
0018  * No memory seen by this driver is pageable.
0019  */
0020 
0021 /*-------------------------------------------------------------------------*/
0022 
0023 /* Allocate the key transfer structures from the previously allocated pool */
0024 
0025 static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
0026                   dma_addr_t dma)
0027 {
0028     memset (qtd, 0, sizeof *qtd);
0029     qtd->qtd_dma = dma;
0030     qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
0031     qtd->hw_next = EHCI_LIST_END(ehci);
0032     qtd->hw_alt_next = EHCI_LIST_END(ehci);
0033     INIT_LIST_HEAD (&qtd->qtd_list);
0034 }
0035 
0036 static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
0037 {
0038     struct ehci_qtd     *qtd;
0039     dma_addr_t      dma;
0040 
0041     qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
0042     if (qtd != NULL) {
0043         ehci_qtd_init(ehci, qtd, dma);
0044     }
0045     return qtd;
0046 }
0047 
0048 static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
0049 {
0050     dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
0051 }
0052 
0053 
0054 static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
0055 {
0056     /* clean qtds first, and know this is not linked */
0057     if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
0058         ehci_dbg (ehci, "unused qh not empty!\n");
0059         BUG ();
0060     }
0061     if (qh->dummy)
0062         ehci_qtd_free (ehci, qh->dummy);
0063     dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
0064     kfree(qh);
0065 }
0066 
0067 static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
0068 {
0069     struct ehci_qh      *qh;
0070     dma_addr_t      dma;
0071 
0072     qh = kzalloc(sizeof *qh, GFP_ATOMIC);
0073     if (!qh)
0074         goto done;
0075     qh->hw = (struct ehci_qh_hw *)
0076         dma_pool_zalloc(ehci->qh_pool, flags, &dma);
0077     if (!qh->hw)
0078         goto fail;
0079     qh->qh_dma = dma;
0080     // INIT_LIST_HEAD (&qh->qh_list);
0081     INIT_LIST_HEAD (&qh->qtd_list);
0082     INIT_LIST_HEAD(&qh->unlink_node);
0083 
0084     /* dummy td enables safe urb queuing */
0085     qh->dummy = ehci_qtd_alloc (ehci, flags);
0086     if (qh->dummy == NULL) {
0087         ehci_dbg (ehci, "no dummy td\n");
0088         goto fail1;
0089     }
0090 done:
0091     return qh;
0092 fail1:
0093     dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
0094 fail:
0095     kfree(qh);
0096     return NULL;
0097 }
0098 
0099 /*-------------------------------------------------------------------------*/
0100 
0101 /* The queue heads and transfer descriptors are managed from pools tied
0102  * to each of the "per device" structures.
0103  * This is the initialisation and cleanup code.
0104  */
0105 
0106 static void ehci_mem_cleanup (struct ehci_hcd *ehci)
0107 {
0108     if (ehci->async)
0109         qh_destroy(ehci, ehci->async);
0110     ehci->async = NULL;
0111 
0112     if (ehci->dummy)
0113         qh_destroy(ehci, ehci->dummy);
0114     ehci->dummy = NULL;
0115 
0116     /* DMA consistent memory and pools */
0117     dma_pool_destroy(ehci->qtd_pool);
0118     ehci->qtd_pool = NULL;
0119     dma_pool_destroy(ehci->qh_pool);
0120     ehci->qh_pool = NULL;
0121     dma_pool_destroy(ehci->itd_pool);
0122     ehci->itd_pool = NULL;
0123     dma_pool_destroy(ehci->sitd_pool);
0124     ehci->sitd_pool = NULL;
0125 
0126     if (ehci->periodic)
0127         dma_free_coherent(ehci_to_hcd(ehci)->self.sysdev,
0128             ehci->periodic_size * sizeof (u32),
0129             ehci->periodic, ehci->periodic_dma);
0130     ehci->periodic = NULL;
0131 
0132     /* shadow periodic table */
0133     kfree(ehci->pshadow);
0134     ehci->pshadow = NULL;
0135 }
0136 
0137 /* remember to add cleanup code (above) if you add anything here */
0138 static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
0139 {
0140     int i;
0141 
0142     /* QTDs for control/bulk/intr transfers */
0143     ehci->qtd_pool = dma_pool_create ("ehci_qtd",
0144             ehci_to_hcd(ehci)->self.sysdev,
0145             sizeof (struct ehci_qtd),
0146             32 /* byte alignment (for hw parts) */,
0147             4096 /* can't cross 4K */);
0148     if (!ehci->qtd_pool) {
0149         goto fail;
0150     }
0151 
0152     /* QHs for control/bulk/intr transfers */
0153     ehci->qh_pool = dma_pool_create ("ehci_qh",
0154             ehci_to_hcd(ehci)->self.sysdev,
0155             sizeof(struct ehci_qh_hw),
0156             32 /* byte alignment (for hw parts) */,
0157             4096 /* can't cross 4K */);
0158     if (!ehci->qh_pool) {
0159         goto fail;
0160     }
0161     ehci->async = ehci_qh_alloc (ehci, flags);
0162     if (!ehci->async) {
0163         goto fail;
0164     }
0165 
0166     /* ITD for high speed ISO transfers */
0167     ehci->itd_pool = dma_pool_create ("ehci_itd",
0168             ehci_to_hcd(ehci)->self.sysdev,
0169             sizeof (struct ehci_itd),
0170             32 /* byte alignment (for hw parts) */,
0171             4096 /* can't cross 4K */);
0172     if (!ehci->itd_pool) {
0173         goto fail;
0174     }
0175 
0176     /* SITD for full/low speed split ISO transfers */
0177     ehci->sitd_pool = dma_pool_create ("ehci_sitd",
0178             ehci_to_hcd(ehci)->self.sysdev,
0179             sizeof (struct ehci_sitd),
0180             32 /* byte alignment (for hw parts) */,
0181             4096 /* can't cross 4K */);
0182     if (!ehci->sitd_pool) {
0183         goto fail;
0184     }
0185 
0186     /* Hardware periodic table */
0187     ehci->periodic = (__le32 *)
0188         dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev,
0189             ehci->periodic_size * sizeof(__le32),
0190             &ehci->periodic_dma, flags);
0191     if (ehci->periodic == NULL) {
0192         goto fail;
0193     }
0194 
0195     if (ehci->use_dummy_qh) {
0196         struct ehci_qh_hw   *hw;
0197         ehci->dummy = ehci_qh_alloc(ehci, flags);
0198         if (!ehci->dummy)
0199             goto fail;
0200 
0201         hw = ehci->dummy->hw;
0202         hw->hw_next = EHCI_LIST_END(ehci);
0203         hw->hw_qtd_next = EHCI_LIST_END(ehci);
0204         hw->hw_alt_next = EHCI_LIST_END(ehci);
0205         ehci->dummy->hw = hw;
0206 
0207         for (i = 0; i < ehci->periodic_size; i++)
0208             ehci->periodic[i] = cpu_to_hc32(ehci,
0209                     ehci->dummy->qh_dma);
0210     } else {
0211         for (i = 0; i < ehci->periodic_size; i++)
0212             ehci->periodic[i] = EHCI_LIST_END(ehci);
0213     }
0214 
0215     /* software shadow of hardware table */
0216     ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
0217     if (ehci->pshadow != NULL)
0218         return 0;
0219 
0220 fail:
0221     ehci_dbg (ehci, "couldn't init memory\n");
0222     ehci_mem_cleanup (ehci);
0223     return -ENOMEM;
0224 }