Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright 2014-2015 Freescale
0003 // Copyright 2018 NXP
0004 
0005 /*
0006  * Driver for NXP Layerscape Queue Direct Memory Access Controller
0007  *
0008  * Author:
0009  *  Wen He <wen.he_1@nxp.com>
0010  *  Jiaheng Fan <jiaheng.fan@nxp.com>
0011  *
0012  */
0013 
0014 #include <linux/module.h>
0015 #include <linux/delay.h>
0016 #include <linux/of_irq.h>
0017 #include <linux/of_platform.h>
0018 #include <linux/of_dma.h>
0019 #include <linux/dma-mapping.h>
0020 
0021 #include "virt-dma.h"
0022 #include "fsldma.h"
0023 
0024 /* Register related definition */
0025 #define FSL_QDMA_DMR            0x0
0026 #define FSL_QDMA_DSR            0x4
0027 #define FSL_QDMA_DEIER          0xe00
0028 #define FSL_QDMA_DEDR           0xe04
0029 #define FSL_QDMA_DECFDW0R       0xe10
0030 #define FSL_QDMA_DECFDW1R       0xe14
0031 #define FSL_QDMA_DECFDW2R       0xe18
0032 #define FSL_QDMA_DECFDW3R       0xe1c
0033 #define FSL_QDMA_DECFQIDR       0xe30
0034 #define FSL_QDMA_DECBR          0xe34
0035 
0036 #define FSL_QDMA_BCQMR(x)       (0xc0 + 0x100 * (x))
0037 #define FSL_QDMA_BCQSR(x)       (0xc4 + 0x100 * (x))
0038 #define FSL_QDMA_BCQEDPA_SADDR(x)   (0xc8 + 0x100 * (x))
0039 #define FSL_QDMA_BCQDPA_SADDR(x)    (0xcc + 0x100 * (x))
0040 #define FSL_QDMA_BCQEEPA_SADDR(x)   (0xd0 + 0x100 * (x))
0041 #define FSL_QDMA_BCQEPA_SADDR(x)    (0xd4 + 0x100 * (x))
0042 #define FSL_QDMA_BCQIER(x)      (0xe0 + 0x100 * (x))
0043 #define FSL_QDMA_BCQIDR(x)      (0xe4 + 0x100 * (x))
0044 
0045 #define FSL_QDMA_SQDPAR         0x80c
0046 #define FSL_QDMA_SQEPAR         0x814
0047 #define FSL_QDMA_BSQMR          0x800
0048 #define FSL_QDMA_BSQSR          0x804
0049 #define FSL_QDMA_BSQICR         0x828
0050 #define FSL_QDMA_CQMR           0xa00
0051 #define FSL_QDMA_CQDSCR1        0xa08
0052 #define FSL_QDMA_CQDSCR2                0xa0c
0053 #define FSL_QDMA_CQIER          0xa10
0054 #define FSL_QDMA_CQEDR          0xa14
0055 #define FSL_QDMA_SQCCMR         0xa20
0056 
0057 /* Registers for bit and genmask */
0058 #define FSL_QDMA_CQIDR_SQT      BIT(15)
0059 #define QDMA_CCDF_FORMAT        BIT(29)
0060 #define QDMA_CCDF_SER           BIT(30)
0061 #define QDMA_SG_FIN         BIT(30)
0062 #define QDMA_SG_LEN_MASK        GENMASK(29, 0)
0063 #define QDMA_CCDF_MASK          GENMASK(28, 20)
0064 
0065 #define FSL_QDMA_DEDR_CLEAR     GENMASK(31, 0)
0066 #define FSL_QDMA_BCQIDR_CLEAR       GENMASK(31, 0)
0067 #define FSL_QDMA_DEIER_CLEAR        GENMASK(31, 0)
0068 
0069 #define FSL_QDMA_BCQIER_CQTIE       BIT(15)
0070 #define FSL_QDMA_BCQIER_CQPEIE      BIT(23)
0071 #define FSL_QDMA_BSQICR_ICEN        BIT(31)
0072 
0073 #define FSL_QDMA_BSQICR_ICST(x)     ((x) << 16)
0074 #define FSL_QDMA_CQIER_MEIE     BIT(31)
0075 #define FSL_QDMA_CQIER_TEIE     BIT(0)
0076 #define FSL_QDMA_SQCCMR_ENTER_WM    BIT(21)
0077 
0078 #define FSL_QDMA_BCQMR_EN       BIT(31)
0079 #define FSL_QDMA_BCQMR_EI       BIT(30)
0080 #define FSL_QDMA_BCQMR_CD_THLD(x)   ((x) << 20)
0081 #define FSL_QDMA_BCQMR_CQ_SIZE(x)   ((x) << 16)
0082 
0083 #define FSL_QDMA_BCQSR_QF       BIT(16)
0084 #define FSL_QDMA_BCQSR_XOFF     BIT(0)
0085 
0086 #define FSL_QDMA_BSQMR_EN       BIT(31)
0087 #define FSL_QDMA_BSQMR_DI       BIT(30)
0088 #define FSL_QDMA_BSQMR_CQ_SIZE(x)   ((x) << 16)
0089 
0090 #define FSL_QDMA_BSQSR_QE       BIT(17)
0091 
0092 #define FSL_QDMA_DMR_DQD        BIT(30)
0093 #define FSL_QDMA_DSR_DB     BIT(31)
0094 
0095 /* Size related definition */
0096 #define FSL_QDMA_QUEUE_MAX      8
0097 #define FSL_QDMA_COMMAND_BUFFER_SIZE    64
0098 #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
0099 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
0100 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
0101 #define FSL_QDMA_QUEUE_NUM_MAX      8
0102 
0103 /* Field definition for CMD */
0104 #define FSL_QDMA_CMD_RWTTYPE        0x4
0105 #define FSL_QDMA_CMD_LWC                0x2
0106 #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
0107 #define FSL_QDMA_CMD_NS_OFFSET      27
0108 #define FSL_QDMA_CMD_DQOS_OFFSET    24
0109 #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
0110 #define FSL_QDMA_CMD_DSEN_OFFSET    19
0111 #define FSL_QDMA_CMD_LWC_OFFSET     16
0112 
0113 /* Field definition for Descriptor status */
0114 #define QDMA_CCDF_STATUS_RTE        BIT(5)
0115 #define QDMA_CCDF_STATUS_WTE        BIT(4)
0116 #define QDMA_CCDF_STATUS_CDE        BIT(2)
0117 #define QDMA_CCDF_STATUS_SDE        BIT(1)
0118 #define QDMA_CCDF_STATUS_DDE        BIT(0)
0119 #define QDMA_CCDF_STATUS_MASK       (QDMA_CCDF_STATUS_RTE | \
0120                     QDMA_CCDF_STATUS_WTE | \
0121                     QDMA_CCDF_STATUS_CDE | \
0122                     QDMA_CCDF_STATUS_SDE | \
0123                     QDMA_CCDF_STATUS_DDE)
0124 
0125 /* Field definition for Descriptor offset */
0126 #define QDMA_CCDF_OFFSET        20
0127 #define QDMA_SDDF_CMD(x)        (((u64)(x)) << 32)
0128 
0129 /* Field definition for safe loop count*/
0130 #define FSL_QDMA_HALT_COUNT     1500
0131 #define FSL_QDMA_MAX_SIZE       16385
0132 #define FSL_QDMA_COMP_TIMEOUT       1000
0133 #define FSL_COMMAND_QUEUE_OVERFLLOW 10
0134 
0135 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)          \
0136     (((fsl_qdma_engine)->block_offset) * (x))
0137 
0138 /**
0139  * struct fsl_qdma_format - This is the struct holding describing compound
0140  *              descriptor format with qDMA.
0141  * @status:         Command status and enqueue status notification.
0142  * @cfg:            Frame offset and frame format.
0143  * @addr_lo:            Holding the compound descriptor of the lower
0144  *              32-bits address in memory 40-bit address.
0145  * @addr_hi:            Same as above member, but point high 8-bits in
0146  *              memory 40-bit address.
0147  * @__reserved1:        Reserved field.
0148  * @cfg8b_w1:           Compound descriptor command queue origin produced
0149  *              by qDMA and dynamic debug field.
0150  * @data:           Pointer to the memory 40-bit address, describes DMA
0151  *              source information and DMA destination information.
0152  */
0153 struct fsl_qdma_format {
0154     __le32 status;
0155     __le32 cfg;
0156     union {
0157         struct {
0158             __le32 addr_lo;
0159             u8 addr_hi;
0160             u8 __reserved1[2];
0161             u8 cfg8b_w1;
0162         } __packed;
0163         __le64 data;
0164     };
0165 } __packed;
0166 
0167 /* qDMA status notification pre information */
0168 struct fsl_pre_status {
0169     u64 addr;
0170     u8 queue;
0171 };
0172 
0173 static DEFINE_PER_CPU(struct fsl_pre_status, pre);
0174 
0175 struct fsl_qdma_chan {
0176     struct virt_dma_chan        vchan;
0177     struct virt_dma_desc        vdesc;
0178     enum dma_status         status;
0179     struct fsl_qdma_engine      *qdma;
0180     struct fsl_qdma_queue       *queue;
0181 };
0182 
0183 struct fsl_qdma_queue {
0184     struct fsl_qdma_format  *virt_head;
0185     struct fsl_qdma_format  *virt_tail;
0186     struct list_head    comp_used;
0187     struct list_head    comp_free;
0188     struct dma_pool     *comp_pool;
0189     struct dma_pool     *desc_pool;
0190     spinlock_t      queue_lock;
0191     dma_addr_t      bus_addr;
0192     u32                     n_cq;
0193     u32         id;
0194     struct fsl_qdma_format  *cq;
0195     void __iomem        *block_base;
0196 };
0197 
0198 struct fsl_qdma_comp {
0199     dma_addr_t              bus_addr;
0200     dma_addr_t              desc_bus_addr;
0201     struct fsl_qdma_format  *virt_addr;
0202     struct fsl_qdma_format  *desc_virt_addr;
0203     struct fsl_qdma_chan    *qchan;
0204     struct virt_dma_desc    vdesc;
0205     struct list_head    list;
0206 };
0207 
0208 struct fsl_qdma_engine {
0209     struct dma_device   dma_dev;
0210     void __iomem        *ctrl_base;
0211     void __iomem            *status_base;
0212     void __iomem        *block_base;
0213     u32         n_chans;
0214     u32         n_queues;
0215     struct mutex            fsl_qdma_mutex;
0216     int         error_irq;
0217     int         *queue_irq;
0218     u32         feature;
0219     struct fsl_qdma_queue   *queue;
0220     struct fsl_qdma_queue   **status;
0221     struct fsl_qdma_chan    *chans;
0222     int         block_number;
0223     int         block_offset;
0224     int         irq_base;
0225     int         desc_allocated;
0226 
0227 };
0228 
0229 static inline u64
0230 qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
0231 {
0232     return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
0233 }
0234 
0235 static inline void
0236 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
0237 {
0238     ccdf->addr_hi = upper_32_bits(addr);
0239     ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
0240 }
0241 
0242 static inline u8
0243 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
0244 {
0245     return ccdf->cfg8b_w1 & U8_MAX;
0246 }
0247 
0248 static inline int
0249 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
0250 {
0251     return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
0252 }
0253 
0254 static inline void
0255 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
0256 {
0257     ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
0258                 (offset << QDMA_CCDF_OFFSET));
0259 }
0260 
0261 static inline int
0262 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
0263 {
0264     return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
0265 }
0266 
0267 static inline void
0268 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
0269 {
0270     ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
0271 }
0272 
0273 static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
0274 {
0275     csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
0276 }
0277 
0278 static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
0279 {
0280     csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
0281 }
0282 
0283 static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
0284 {
0285     return FSL_DMA_IN(qdma, addr, 32);
0286 }
0287 
0288 static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
0289             void __iomem *addr)
0290 {
0291     FSL_DMA_OUT(qdma, addr, val, 32);
0292 }
0293 
0294 static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
0295 {
0296     return container_of(chan, struct fsl_qdma_chan, vchan.chan);
0297 }
0298 
0299 static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
0300 {
0301     return container_of(vd, struct fsl_qdma_comp, vdesc);
0302 }
0303 
0304 static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
0305 {
0306     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
0307     struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
0308     struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
0309     struct fsl_qdma_comp *comp_temp, *_comp_temp;
0310     unsigned long flags;
0311     LIST_HEAD(head);
0312 
0313     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
0314     vchan_get_all_descriptors(&fsl_chan->vchan, &head);
0315     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
0316 
0317     vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
0318 
0319     if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
0320         return;
0321 
0322     list_for_each_entry_safe(comp_temp, _comp_temp,
0323                  &fsl_queue->comp_used, list) {
0324         dma_pool_free(fsl_queue->comp_pool,
0325                   comp_temp->virt_addr,
0326                   comp_temp->bus_addr);
0327         dma_pool_free(fsl_queue->desc_pool,
0328                   comp_temp->desc_virt_addr,
0329                   comp_temp->desc_bus_addr);
0330         list_del(&comp_temp->list);
0331         kfree(comp_temp);
0332     }
0333 
0334     list_for_each_entry_safe(comp_temp, _comp_temp,
0335                  &fsl_queue->comp_free, list) {
0336         dma_pool_free(fsl_queue->comp_pool,
0337                   comp_temp->virt_addr,
0338                   comp_temp->bus_addr);
0339         dma_pool_free(fsl_queue->desc_pool,
0340                   comp_temp->desc_virt_addr,
0341                   comp_temp->desc_bus_addr);
0342         list_del(&comp_temp->list);
0343         kfree(comp_temp);
0344     }
0345 
0346     dma_pool_destroy(fsl_queue->comp_pool);
0347     dma_pool_destroy(fsl_queue->desc_pool);
0348 
0349     fsl_qdma->desc_allocated--;
0350     fsl_queue->comp_pool = NULL;
0351     fsl_queue->desc_pool = NULL;
0352 }
0353 
0354 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
0355                       dma_addr_t dst, dma_addr_t src, u32 len)
0356 {
0357     u32 cmd;
0358     struct fsl_qdma_format *sdf, *ddf;
0359     struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
0360 
0361     ccdf = fsl_comp->virt_addr;
0362     csgf_desc = fsl_comp->virt_addr + 1;
0363     csgf_src = fsl_comp->virt_addr + 2;
0364     csgf_dest = fsl_comp->virt_addr + 3;
0365     sdf = fsl_comp->desc_virt_addr;
0366     ddf = fsl_comp->desc_virt_addr + 1;
0367 
0368     memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
0369     memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
0370     /* Head Command Descriptor(Frame Descriptor) */
0371     qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
0372     qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
0373     qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
0374     /* Status notification is enqueued to status queue. */
0375     /* Compound Command Descriptor(Frame List Table) */
0376     qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
0377     /* It must be 32 as Compound S/G Descriptor */
0378     qdma_csgf_set_len(csgf_desc, 32);
0379     qdma_desc_addr_set64(csgf_src, src);
0380     qdma_csgf_set_len(csgf_src, len);
0381     qdma_desc_addr_set64(csgf_dest, dst);
0382     qdma_csgf_set_len(csgf_dest, len);
0383     /* This entry is the last entry. */
0384     qdma_csgf_set_f(csgf_dest, len);
0385     /* Descriptor Buffer */
0386     cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
0387               FSL_QDMA_CMD_RWTTYPE_OFFSET);
0388     sdf->data = QDMA_SDDF_CMD(cmd);
0389 
0390     cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
0391               FSL_QDMA_CMD_RWTTYPE_OFFSET);
0392     cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
0393     ddf->data = QDMA_SDDF_CMD(cmd);
0394 }
0395 
0396 /*
0397  * Pre-request full command descriptor for enqueue.
0398  */
0399 static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
0400 {
0401     int i;
0402     struct fsl_qdma_comp *comp_temp, *_comp_temp;
0403 
0404     for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
0405         comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
0406         if (!comp_temp)
0407             goto err_alloc;
0408         comp_temp->virt_addr =
0409             dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
0410                        &comp_temp->bus_addr);
0411         if (!comp_temp->virt_addr)
0412             goto err_dma_alloc;
0413 
0414         comp_temp->desc_virt_addr =
0415             dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
0416                        &comp_temp->desc_bus_addr);
0417         if (!comp_temp->desc_virt_addr)
0418             goto err_desc_dma_alloc;
0419 
0420         list_add_tail(&comp_temp->list, &queue->comp_free);
0421     }
0422 
0423     return 0;
0424 
0425 err_desc_dma_alloc:
0426     dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
0427               comp_temp->bus_addr);
0428 
0429 err_dma_alloc:
0430     kfree(comp_temp);
0431 
0432 err_alloc:
0433     list_for_each_entry_safe(comp_temp, _comp_temp,
0434                  &queue->comp_free, list) {
0435         if (comp_temp->virt_addr)
0436             dma_pool_free(queue->comp_pool,
0437                       comp_temp->virt_addr,
0438                       comp_temp->bus_addr);
0439         if (comp_temp->desc_virt_addr)
0440             dma_pool_free(queue->desc_pool,
0441                       comp_temp->desc_virt_addr,
0442                       comp_temp->desc_bus_addr);
0443 
0444         list_del(&comp_temp->list);
0445         kfree(comp_temp);
0446     }
0447 
0448     return -ENOMEM;
0449 }
0450 
0451 /*
0452  * Request a command descriptor for enqueue.
0453  */
0454 static struct fsl_qdma_comp
0455 *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
0456 {
0457     unsigned long flags;
0458     struct fsl_qdma_comp *comp_temp;
0459     int timeout = FSL_QDMA_COMP_TIMEOUT;
0460     struct fsl_qdma_queue *queue = fsl_chan->queue;
0461 
0462     while (timeout--) {
0463         spin_lock_irqsave(&queue->queue_lock, flags);
0464         if (!list_empty(&queue->comp_free)) {
0465             comp_temp = list_first_entry(&queue->comp_free,
0466                              struct fsl_qdma_comp,
0467                              list);
0468             list_del(&comp_temp->list);
0469 
0470             spin_unlock_irqrestore(&queue->queue_lock, flags);
0471             comp_temp->qchan = fsl_chan;
0472             return comp_temp;
0473         }
0474         spin_unlock_irqrestore(&queue->queue_lock, flags);
0475         udelay(1);
0476     }
0477 
0478     return NULL;
0479 }
0480 
0481 static struct fsl_qdma_queue
0482 *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
0483                 struct fsl_qdma_engine *fsl_qdma)
0484 {
0485     int ret, len, i, j;
0486     int queue_num, block_number;
0487     unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
0488     struct fsl_qdma_queue *queue_head, *queue_temp;
0489 
0490     queue_num = fsl_qdma->n_queues;
0491     block_number = fsl_qdma->block_number;
0492 
0493     if (queue_num > FSL_QDMA_QUEUE_MAX)
0494         queue_num = FSL_QDMA_QUEUE_MAX;
0495     len = sizeof(*queue_head) * queue_num * block_number;
0496     queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
0497     if (!queue_head)
0498         return NULL;
0499 
0500     ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
0501                          queue_size, queue_num);
0502     if (ret) {
0503         dev_err(&pdev->dev, "Can't get queue-sizes.\n");
0504         return NULL;
0505     }
0506     for (j = 0; j < block_number; j++) {
0507         for (i = 0; i < queue_num; i++) {
0508             if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
0509                 queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
0510                 dev_err(&pdev->dev,
0511                     "Get wrong queue-sizes.\n");
0512                 return NULL;
0513             }
0514             queue_temp = queue_head + i + (j * queue_num);
0515 
0516             queue_temp->cq =
0517             dma_alloc_coherent(&pdev->dev,
0518                        sizeof(struct fsl_qdma_format) *
0519                        queue_size[i],
0520                        &queue_temp->bus_addr,
0521                        GFP_KERNEL);
0522             if (!queue_temp->cq)
0523                 return NULL;
0524             queue_temp->block_base = fsl_qdma->block_base +
0525                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
0526             queue_temp->n_cq = queue_size[i];
0527             queue_temp->id = i;
0528             queue_temp->virt_head = queue_temp->cq;
0529             queue_temp->virt_tail = queue_temp->cq;
0530             /*
0531              * List for queue command buffer
0532              */
0533             INIT_LIST_HEAD(&queue_temp->comp_used);
0534             spin_lock_init(&queue_temp->queue_lock);
0535         }
0536     }
0537     return queue_head;
0538 }
0539 
0540 static struct fsl_qdma_queue
0541 *fsl_qdma_prep_status_queue(struct platform_device *pdev)
0542 {
0543     int ret;
0544     unsigned int status_size;
0545     struct fsl_qdma_queue *status_head;
0546     struct device_node *np = pdev->dev.of_node;
0547 
0548     ret = of_property_read_u32(np, "status-sizes", &status_size);
0549     if (ret) {
0550         dev_err(&pdev->dev, "Can't get status-sizes.\n");
0551         return NULL;
0552     }
0553     if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
0554         status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
0555         dev_err(&pdev->dev, "Get wrong status_size.\n");
0556         return NULL;
0557     }
0558     status_head = devm_kzalloc(&pdev->dev,
0559                    sizeof(*status_head), GFP_KERNEL);
0560     if (!status_head)
0561         return NULL;
0562 
0563     /*
0564      * Buffer for queue command
0565      */
0566     status_head->cq = dma_alloc_coherent(&pdev->dev,
0567                          sizeof(struct fsl_qdma_format) *
0568                          status_size,
0569                          &status_head->bus_addr,
0570                          GFP_KERNEL);
0571     if (!status_head->cq) {
0572         devm_kfree(&pdev->dev, status_head);
0573         return NULL;
0574     }
0575     status_head->n_cq = status_size;
0576     status_head->virt_head = status_head->cq;
0577     status_head->virt_tail = status_head->cq;
0578     status_head->comp_pool = NULL;
0579 
0580     return status_head;
0581 }
0582 
0583 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
0584 {
0585     u32 reg;
0586     int i, j, count = FSL_QDMA_HALT_COUNT;
0587     void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
0588 
0589     /* Disable the command queue and wait for idle state. */
0590     reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
0591     reg |= FSL_QDMA_DMR_DQD;
0592     qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
0593     for (j = 0; j < fsl_qdma->block_number; j++) {
0594         block = fsl_qdma->block_base +
0595             FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
0596         for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
0597             qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
0598     }
0599     while (1) {
0600         reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
0601         if (!(reg & FSL_QDMA_DSR_DB))
0602             break;
0603         if (count-- < 0)
0604             return -EBUSY;
0605         udelay(100);
0606     }
0607 
0608     for (j = 0; j < fsl_qdma->block_number; j++) {
0609         block = fsl_qdma->block_base +
0610             FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
0611 
0612         /* Disable status queue. */
0613         qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
0614 
0615         /*
0616          * clear the command queue interrupt detect register for
0617          * all queues.
0618          */
0619         qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
0620                 block + FSL_QDMA_BCQIDR(0));
0621     }
0622 
0623     return 0;
0624 }
0625 
0626 static int
0627 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
0628                  void *block,
0629                  int id)
0630 {
0631     bool duplicate;
0632     u32 reg, i, count;
0633     u8 completion_status;
0634     struct fsl_qdma_queue *temp_queue;
0635     struct fsl_qdma_format *status_addr;
0636     struct fsl_qdma_comp *fsl_comp = NULL;
0637     struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
0638     struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
0639 
0640     count = FSL_QDMA_MAX_SIZE;
0641 
0642     while (count--) {
0643         duplicate = 0;
0644         reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
0645         if (reg & FSL_QDMA_BSQSR_QE)
0646             return 0;
0647 
0648         status_addr = fsl_status->virt_head;
0649 
0650         if (qdma_ccdf_get_queue(status_addr) ==
0651            __this_cpu_read(pre.queue) &&
0652             qdma_ccdf_addr_get64(status_addr) ==
0653             __this_cpu_read(pre.addr))
0654             duplicate = 1;
0655         i = qdma_ccdf_get_queue(status_addr) +
0656             id * fsl_qdma->n_queues;
0657         __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
0658         __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
0659         temp_queue = fsl_queue + i;
0660 
0661         spin_lock(&temp_queue->queue_lock);
0662         if (list_empty(&temp_queue->comp_used)) {
0663             if (!duplicate) {
0664                 spin_unlock(&temp_queue->queue_lock);
0665                 return -EAGAIN;
0666             }
0667         } else {
0668             fsl_comp = list_first_entry(&temp_queue->comp_used,
0669                             struct fsl_qdma_comp, list);
0670             if (fsl_comp->bus_addr + 16 !=
0671                 __this_cpu_read(pre.addr)) {
0672                 if (!duplicate) {
0673                     spin_unlock(&temp_queue->queue_lock);
0674                     return -EAGAIN;
0675                 }
0676             }
0677         }
0678 
0679         if (duplicate) {
0680             reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
0681             reg |= FSL_QDMA_BSQMR_DI;
0682             qdma_desc_addr_set64(status_addr, 0x0);
0683             fsl_status->virt_head++;
0684             if (fsl_status->virt_head == fsl_status->cq
0685                            + fsl_status->n_cq)
0686                 fsl_status->virt_head = fsl_status->cq;
0687             qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
0688             spin_unlock(&temp_queue->queue_lock);
0689             continue;
0690         }
0691         list_del(&fsl_comp->list);
0692 
0693         completion_status = qdma_ccdf_get_status(status_addr);
0694 
0695         reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
0696         reg |= FSL_QDMA_BSQMR_DI;
0697         qdma_desc_addr_set64(status_addr, 0x0);
0698         fsl_status->virt_head++;
0699         if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
0700             fsl_status->virt_head = fsl_status->cq;
0701         qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
0702         spin_unlock(&temp_queue->queue_lock);
0703 
0704         /* The completion_status is evaluated here
0705          * (outside of spin lock)
0706          */
0707         if (completion_status) {
0708             /* A completion error occurred! */
0709             if (completion_status & QDMA_CCDF_STATUS_WTE) {
0710                 /* Write transaction error */
0711                 fsl_comp->vdesc.tx_result.result =
0712                     DMA_TRANS_WRITE_FAILED;
0713             } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
0714                 /* Read transaction error */
0715                 fsl_comp->vdesc.tx_result.result =
0716                     DMA_TRANS_READ_FAILED;
0717             } else {
0718                 /* Command/source/destination
0719                  * description error
0720                  */
0721                 fsl_comp->vdesc.tx_result.result =
0722                     DMA_TRANS_ABORTED;
0723                 dev_err(fsl_qdma->dma_dev.dev,
0724                     "DMA status descriptor error %x\n",
0725                     completion_status);
0726             }
0727         }
0728 
0729         spin_lock(&fsl_comp->qchan->vchan.lock);
0730         vchan_cookie_complete(&fsl_comp->vdesc);
0731         fsl_comp->qchan->status = DMA_COMPLETE;
0732         spin_unlock(&fsl_comp->qchan->vchan.lock);
0733     }
0734 
0735     return 0;
0736 }
0737 
0738 static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
0739 {
0740     unsigned int intr;
0741     struct fsl_qdma_engine *fsl_qdma = dev_id;
0742     void __iomem *status = fsl_qdma->status_base;
0743     unsigned int decfdw0r;
0744     unsigned int decfdw1r;
0745     unsigned int decfdw2r;
0746     unsigned int decfdw3r;
0747 
0748     intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
0749 
0750     if (intr) {
0751         decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
0752         decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
0753         decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
0754         decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
0755         dev_err(fsl_qdma->dma_dev.dev,
0756             "DMA transaction error! (%x: %x-%x-%x-%x)\n",
0757             intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
0758     }
0759 
0760     qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
0761     return IRQ_HANDLED;
0762 }
0763 
0764 static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
0765 {
0766     int id;
0767     unsigned int intr, reg;
0768     struct fsl_qdma_engine *fsl_qdma = dev_id;
0769     void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
0770 
0771     id = irq - fsl_qdma->irq_base;
0772     if (id < 0 && id > fsl_qdma->block_number) {
0773         dev_err(fsl_qdma->dma_dev.dev,
0774             "irq %d is wrong irq_base is %d\n",
0775             irq, fsl_qdma->irq_base);
0776     }
0777 
0778     block = fsl_qdma->block_base +
0779         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
0780 
0781     intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
0782 
0783     if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
0784         intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
0785 
0786     if (intr != 0) {
0787         reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
0788         reg |= FSL_QDMA_DMR_DQD;
0789         qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
0790         qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
0791         dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
0792     }
0793 
0794     /* Clear all detected events and interrupts. */
0795     qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
0796             block + FSL_QDMA_BCQIDR(0));
0797 
0798     return IRQ_HANDLED;
0799 }
0800 
0801 static int
0802 fsl_qdma_irq_init(struct platform_device *pdev,
0803           struct fsl_qdma_engine *fsl_qdma)
0804 {
0805     int i;
0806     int cpu;
0807     int ret;
0808     char irq_name[20];
0809 
0810     fsl_qdma->error_irq =
0811         platform_get_irq_byname(pdev, "qdma-error");
0812     if (fsl_qdma->error_irq < 0)
0813         return fsl_qdma->error_irq;
0814 
0815     ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
0816                    fsl_qdma_error_handler, 0,
0817                    "qDMA error", fsl_qdma);
0818     if (ret) {
0819         dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
0820         return  ret;
0821     }
0822 
0823     for (i = 0; i < fsl_qdma->block_number; i++) {
0824         sprintf(irq_name, "qdma-queue%d", i);
0825         fsl_qdma->queue_irq[i] =
0826                 platform_get_irq_byname(pdev, irq_name);
0827 
0828         if (fsl_qdma->queue_irq[i] < 0)
0829             return fsl_qdma->queue_irq[i];
0830 
0831         ret = devm_request_irq(&pdev->dev,
0832                        fsl_qdma->queue_irq[i],
0833                        fsl_qdma_queue_handler,
0834                        0,
0835                        "qDMA queue",
0836                        fsl_qdma);
0837         if (ret) {
0838             dev_err(&pdev->dev,
0839                 "Can't register qDMA queue IRQ.\n");
0840             return  ret;
0841         }
0842 
0843         cpu = i % num_online_cpus();
0844         ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
0845                         get_cpu_mask(cpu));
0846         if (ret) {
0847             dev_err(&pdev->dev,
0848                 "Can't set cpu %d affinity to IRQ %d.\n",
0849                 cpu,
0850                 fsl_qdma->queue_irq[i]);
0851             return  ret;
0852         }
0853     }
0854 
0855     return 0;
0856 }
0857 
0858 static void fsl_qdma_irq_exit(struct platform_device *pdev,
0859                   struct fsl_qdma_engine *fsl_qdma)
0860 {
0861     int i;
0862 
0863     devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
0864     for (i = 0; i < fsl_qdma->block_number; i++)
0865         devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
0866 }
0867 
0868 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
0869 {
0870     u32 reg;
0871     int i, j, ret;
0872     struct fsl_qdma_queue *temp;
0873     void __iomem *status = fsl_qdma->status_base;
0874     void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
0875     struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
0876 
0877     /* Try to halt the qDMA engine first. */
0878     ret = fsl_qdma_halt(fsl_qdma);
0879     if (ret) {
0880         dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
0881         return ret;
0882     }
0883 
0884     for (i = 0; i < fsl_qdma->block_number; i++) {
0885         /*
0886          * Clear the command queue interrupt detect register for
0887          * all queues.
0888          */
0889 
0890         block = fsl_qdma->block_base +
0891             FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
0892         qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
0893                 block + FSL_QDMA_BCQIDR(0));
0894     }
0895 
0896     for (j = 0; j < fsl_qdma->block_number; j++) {
0897         block = fsl_qdma->block_base +
0898             FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
0899         for (i = 0; i < fsl_qdma->n_queues; i++) {
0900             temp = fsl_queue + i + (j * fsl_qdma->n_queues);
0901             /*
0902              * Initialize Command Queue registers to
0903              * point to the first
0904              * command descriptor in memory.
0905              * Dequeue Pointer Address Registers
0906              * Enqueue Pointer Address Registers
0907              */
0908 
0909             qdma_writel(fsl_qdma, temp->bus_addr,
0910                     block + FSL_QDMA_BCQDPA_SADDR(i));
0911             qdma_writel(fsl_qdma, temp->bus_addr,
0912                     block + FSL_QDMA_BCQEPA_SADDR(i));
0913 
0914             /* Initialize the queue mode. */
0915             reg = FSL_QDMA_BCQMR_EN;
0916             reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
0917             reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
0918             qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
0919         }
0920 
0921         /*
0922          * Workaround for erratum: ERR010812.
0923          * We must enable XOFF to avoid the enqueue rejection occurs.
0924          * Setting SQCCMR ENTER_WM to 0x20.
0925          */
0926 
0927         qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
0928                 block + FSL_QDMA_SQCCMR);
0929 
0930         /*
0931          * Initialize status queue registers to point to the first
0932          * command descriptor in memory.
0933          * Dequeue Pointer Address Registers
0934          * Enqueue Pointer Address Registers
0935          */
0936 
0937         qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
0938                 block + FSL_QDMA_SQEPAR);
0939         qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
0940                 block + FSL_QDMA_SQDPAR);
0941         /* Initialize status queue interrupt. */
0942         qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
0943                 block + FSL_QDMA_BCQIER(0));
0944         qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
0945                    FSL_QDMA_BSQICR_ICST(5) | 0x8000,
0946                    block + FSL_QDMA_BSQICR);
0947         qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
0948                    FSL_QDMA_CQIER_TEIE,
0949                    block + FSL_QDMA_CQIER);
0950 
0951         /* Initialize the status queue mode. */
0952         reg = FSL_QDMA_BSQMR_EN;
0953         reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
0954             (fsl_qdma->status[j]->n_cq) - 6);
0955 
0956         qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
0957         reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
0958     }
0959 
0960     /* Initialize controller interrupt register. */
0961     qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
0962     qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
0963 
0964     reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
0965     reg &= ~FSL_QDMA_DMR_DQD;
0966     qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
0967 
0968     return 0;
0969 }
0970 
0971 static struct dma_async_tx_descriptor *
0972 fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
0973              dma_addr_t src, size_t len, unsigned long flags)
0974 {
0975     struct fsl_qdma_comp *fsl_comp;
0976     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
0977 
0978     fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
0979 
0980     if (!fsl_comp)
0981         return NULL;
0982 
0983     fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
0984 
0985     return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
0986 }
0987 
0988 static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
0989 {
0990     u32 reg;
0991     struct virt_dma_desc *vdesc;
0992     struct fsl_qdma_comp *fsl_comp;
0993     struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
0994     void __iomem *block = fsl_queue->block_base;
0995 
0996     reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
0997     if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
0998         return;
0999     vdesc = vchan_next_desc(&fsl_chan->vchan);
1000     if (!vdesc)
1001         return;
1002     list_del(&vdesc->node);
1003     fsl_comp = to_fsl_qdma_comp(vdesc);
1004 
1005     memcpy(fsl_queue->virt_head++,
1006            fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
1007     if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
1008         fsl_queue->virt_head = fsl_queue->cq;
1009 
1010     list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
1011     barrier();
1012     reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
1013     reg |= FSL_QDMA_BCQMR_EI;
1014     qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
1015     fsl_chan->status = DMA_IN_PROGRESS;
1016 }
1017 
1018 static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
1019 {
1020     unsigned long flags;
1021     struct fsl_qdma_comp *fsl_comp;
1022     struct fsl_qdma_queue *fsl_queue;
1023 
1024     fsl_comp = to_fsl_qdma_comp(vdesc);
1025     fsl_queue = fsl_comp->qchan->queue;
1026 
1027     spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1028     list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
1029     spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1030 }
1031 
1032 static void fsl_qdma_issue_pending(struct dma_chan *chan)
1033 {
1034     unsigned long flags;
1035     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1036     struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1037 
1038     spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1039     spin_lock(&fsl_chan->vchan.lock);
1040     if (vchan_issue_pending(&fsl_chan->vchan))
1041         fsl_qdma_enqueue_desc(fsl_chan);
1042     spin_unlock(&fsl_chan->vchan.lock);
1043     spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1044 }
1045 
1046 static void fsl_qdma_synchronize(struct dma_chan *chan)
1047 {
1048     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1049 
1050     vchan_synchronize(&fsl_chan->vchan);
1051 }
1052 
1053 static int fsl_qdma_terminate_all(struct dma_chan *chan)
1054 {
1055     LIST_HEAD(head);
1056     unsigned long flags;
1057     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1058 
1059     spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1060     vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1061     spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1062     vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1063     return 0;
1064 }
1065 
1066 static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1067 {
1068     int ret;
1069     struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1070     struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1071     struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1072 
1073     if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1074         return fsl_qdma->desc_allocated;
1075 
1076     INIT_LIST_HEAD(&fsl_queue->comp_free);
1077 
1078     /*
1079      * The dma pool for queue command buffer
1080      */
1081     fsl_queue->comp_pool =
1082     dma_pool_create("comp_pool",
1083             chan->device->dev,
1084             FSL_QDMA_COMMAND_BUFFER_SIZE,
1085             64, 0);
1086     if (!fsl_queue->comp_pool)
1087         return -ENOMEM;
1088 
1089     /*
1090      * The dma pool for Descriptor(SD/DD) buffer
1091      */
1092     fsl_queue->desc_pool =
1093     dma_pool_create("desc_pool",
1094             chan->device->dev,
1095             FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1096             32, 0);
1097     if (!fsl_queue->desc_pool)
1098         goto err_desc_pool;
1099 
1100     ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1101     if (ret) {
1102         dev_err(chan->device->dev,
1103             "failed to alloc dma buffer for S/G descriptor\n");
1104         goto err_mem;
1105     }
1106 
1107     fsl_qdma->desc_allocated++;
1108     return fsl_qdma->desc_allocated;
1109 
1110 err_mem:
1111     dma_pool_destroy(fsl_queue->desc_pool);
1112 err_desc_pool:
1113     dma_pool_destroy(fsl_queue->comp_pool);
1114     return -ENOMEM;
1115 }
1116 
1117 static int fsl_qdma_probe(struct platform_device *pdev)
1118 {
1119     int ret, i;
1120     int blk_num, blk_off;
1121     u32 len, chans, queues;
1122     struct resource *res;
1123     struct fsl_qdma_chan *fsl_chan;
1124     struct fsl_qdma_engine *fsl_qdma;
1125     struct device_node *np = pdev->dev.of_node;
1126 
1127     ret = of_property_read_u32(np, "dma-channels", &chans);
1128     if (ret) {
1129         dev_err(&pdev->dev, "Can't get dma-channels.\n");
1130         return ret;
1131     }
1132 
1133     ret = of_property_read_u32(np, "block-offset", &blk_off);
1134     if (ret) {
1135         dev_err(&pdev->dev, "Can't get block-offset.\n");
1136         return ret;
1137     }
1138 
1139     ret = of_property_read_u32(np, "block-number", &blk_num);
1140     if (ret) {
1141         dev_err(&pdev->dev, "Can't get block-number.\n");
1142         return ret;
1143     }
1144 
1145     blk_num = min_t(int, blk_num, num_online_cpus());
1146 
1147     len = sizeof(*fsl_qdma);
1148     fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1149     if (!fsl_qdma)
1150         return -ENOMEM;
1151 
1152     len = sizeof(*fsl_chan) * chans;
1153     fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1154     if (!fsl_qdma->chans)
1155         return -ENOMEM;
1156 
1157     len = sizeof(struct fsl_qdma_queue *) * blk_num;
1158     fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1159     if (!fsl_qdma->status)
1160         return -ENOMEM;
1161 
1162     len = sizeof(int) * blk_num;
1163     fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1164     if (!fsl_qdma->queue_irq)
1165         return -ENOMEM;
1166 
1167     ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1168     if (ret) {
1169         dev_err(&pdev->dev, "Can't get queues.\n");
1170         return ret;
1171     }
1172 
1173     fsl_qdma->desc_allocated = 0;
1174     fsl_qdma->n_chans = chans;
1175     fsl_qdma->n_queues = queues;
1176     fsl_qdma->block_number = blk_num;
1177     fsl_qdma->block_offset = blk_off;
1178 
1179     mutex_init(&fsl_qdma->fsl_qdma_mutex);
1180 
1181     for (i = 0; i < fsl_qdma->block_number; i++) {
1182         fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1183         if (!fsl_qdma->status[i])
1184             return -ENOMEM;
1185     }
1186     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1187     fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1188     if (IS_ERR(fsl_qdma->ctrl_base))
1189         return PTR_ERR(fsl_qdma->ctrl_base);
1190 
1191     res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1192     fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1193     if (IS_ERR(fsl_qdma->status_base))
1194         return PTR_ERR(fsl_qdma->status_base);
1195 
1196     res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1197     fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1198     if (IS_ERR(fsl_qdma->block_base))
1199         return PTR_ERR(fsl_qdma->block_base);
1200     fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1201     if (!fsl_qdma->queue)
1202         return -ENOMEM;
1203 
1204     ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1205     if (ret)
1206         return ret;
1207 
1208     fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1209     if (fsl_qdma->irq_base < 0)
1210         return fsl_qdma->irq_base;
1211 
1212     fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1213     INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1214 
1215     for (i = 0; i < fsl_qdma->n_chans; i++) {
1216         struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1217 
1218         fsl_chan->qdma = fsl_qdma;
1219         fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1220                             fsl_qdma->block_number);
1221         fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1222         vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1223     }
1224 
1225     dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1226 
1227     fsl_qdma->dma_dev.dev = &pdev->dev;
1228     fsl_qdma->dma_dev.device_free_chan_resources =
1229         fsl_qdma_free_chan_resources;
1230     fsl_qdma->dma_dev.device_alloc_chan_resources =
1231         fsl_qdma_alloc_chan_resources;
1232     fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1233     fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1234     fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1235     fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1236     fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1237 
1238     ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1239     if (ret) {
1240         dev_err(&pdev->dev, "dma_set_mask failure.\n");
1241         return ret;
1242     }
1243 
1244     platform_set_drvdata(pdev, fsl_qdma);
1245 
1246     ret = dma_async_device_register(&fsl_qdma->dma_dev);
1247     if (ret) {
1248         dev_err(&pdev->dev,
1249             "Can't register NXP Layerscape qDMA engine.\n");
1250         return ret;
1251     }
1252 
1253     ret = fsl_qdma_reg_init(fsl_qdma);
1254     if (ret) {
1255         dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1256         return ret;
1257     }
1258 
1259     return 0;
1260 }
1261 
1262 static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1263 {
1264     struct fsl_qdma_chan *chan, *_chan;
1265 
1266     list_for_each_entry_safe(chan, _chan,
1267                  &dmadev->channels, vchan.chan.device_node) {
1268         list_del(&chan->vchan.chan.device_node);
1269         tasklet_kill(&chan->vchan.task);
1270     }
1271 }
1272 
1273 static int fsl_qdma_remove(struct platform_device *pdev)
1274 {
1275     int i;
1276     struct fsl_qdma_queue *status;
1277     struct device_node *np = pdev->dev.of_node;
1278     struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1279 
1280     fsl_qdma_irq_exit(pdev, fsl_qdma);
1281     fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1282     of_dma_controller_free(np);
1283     dma_async_device_unregister(&fsl_qdma->dma_dev);
1284 
1285     for (i = 0; i < fsl_qdma->block_number; i++) {
1286         status = fsl_qdma->status[i];
1287         dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
1288                 status->n_cq, status->cq, status->bus_addr);
1289     }
1290     return 0;
1291 }
1292 
1293 static const struct of_device_id fsl_qdma_dt_ids[] = {
1294     { .compatible = "fsl,ls1021a-qdma", },
1295     { /* sentinel */ }
1296 };
1297 MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1298 
1299 static struct platform_driver fsl_qdma_driver = {
1300     .driver     = {
1301         .name   = "fsl-qdma",
1302         .of_match_table = fsl_qdma_dt_ids,
1303     },
1304     .probe          = fsl_qdma_probe,
1305     .remove     = fsl_qdma_remove,
1306 };
1307 
1308 module_platform_driver(fsl_qdma_driver);
1309 
1310 MODULE_ALIAS("platform:fsl-qdma");
1311 MODULE_LICENSE("GPL v2");
1312 MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");