Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
0004  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
0005  */
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/delay.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmapool.h>
0012 #include <linux/err.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/list.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/sys_soc.h>
0020 #include <linux/of.h>
0021 #include <linux/of_dma.h>
0022 #include <linux/of_device.h>
0023 #include <linux/of_irq.h>
0024 #include <linux/workqueue.h>
0025 #include <linux/completion.h>
0026 #include <linux/soc/ti/k3-ringacc.h>
0027 #include <linux/soc/ti/ti_sci_protocol.h>
0028 #include <linux/soc/ti/ti_sci_inta_msi.h>
0029 #include <linux/dma/k3-event-router.h>
0030 #include <linux/dma/ti-cppi5.h>
0031 
0032 #include "../virt-dma.h"
0033 #include "k3-udma.h"
0034 #include "k3-psil-priv.h"
0035 
0036 struct udma_static_tr {
0037     u8 elsize; /* RPSTR0 */
0038     u16 elcnt; /* RPSTR0 */
0039     u16 bstcnt; /* RPSTR1 */
0040 };
0041 
0042 #define K3_UDMA_MAX_RFLOWS      1024
0043 #define K3_UDMA_DEFAULT_RING_SIZE   16
0044 
0045 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
0046 #define UDMA_RFLOW_SRCTAG_NONE      0
0047 #define UDMA_RFLOW_SRCTAG_CFG_TAG   1
0048 #define UDMA_RFLOW_SRCTAG_FLOW_ID   2
0049 #define UDMA_RFLOW_SRCTAG_SRC_TAG   4
0050 
0051 #define UDMA_RFLOW_DSTTAG_NONE      0
0052 #define UDMA_RFLOW_DSTTAG_CFG_TAG   1
0053 #define UDMA_RFLOW_DSTTAG_FLOW_ID   2
0054 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO    4
0055 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI    5
0056 
0057 struct udma_chan;
0058 
0059 enum k3_dma_type {
0060     DMA_TYPE_UDMA = 0,
0061     DMA_TYPE_BCDMA,
0062     DMA_TYPE_PKTDMA,
0063 };
0064 
0065 enum udma_mmr {
0066     MMR_GCFG = 0,
0067     MMR_BCHANRT,
0068     MMR_RCHANRT,
0069     MMR_TCHANRT,
0070     MMR_LAST,
0071 };
0072 
0073 static const char * const mmr_names[] = {
0074     [MMR_GCFG] = "gcfg",
0075     [MMR_BCHANRT] = "bchanrt",
0076     [MMR_RCHANRT] = "rchanrt",
0077     [MMR_TCHANRT] = "tchanrt",
0078 };
0079 
0080 struct udma_tchan {
0081     void __iomem *reg_rt;
0082 
0083     int id;
0084     struct k3_ring *t_ring; /* Transmit ring */
0085     struct k3_ring *tc_ring; /* Transmit Completion ring */
0086     int tflow_id; /* applicable only for PKTDMA */
0087 
0088 };
0089 
0090 #define udma_bchan udma_tchan
0091 
0092 struct udma_rflow {
0093     int id;
0094     struct k3_ring *fd_ring; /* Free Descriptor ring */
0095     struct k3_ring *r_ring; /* Receive ring */
0096 };
0097 
0098 struct udma_rchan {
0099     void __iomem *reg_rt;
0100 
0101     int id;
0102 };
0103 
0104 struct udma_oes_offsets {
0105     /* K3 UDMA Output Event Offset */
0106     u32 udma_rchan;
0107 
0108     /* BCDMA Output Event Offsets */
0109     u32 bcdma_bchan_data;
0110     u32 bcdma_bchan_ring;
0111     u32 bcdma_tchan_data;
0112     u32 bcdma_tchan_ring;
0113     u32 bcdma_rchan_data;
0114     u32 bcdma_rchan_ring;
0115 
0116     /* PKTDMA Output Event Offsets */
0117     u32 pktdma_tchan_flow;
0118     u32 pktdma_rchan_flow;
0119 };
0120 
0121 #define UDMA_FLAG_PDMA_ACC32        BIT(0)
0122 #define UDMA_FLAG_PDMA_BURST        BIT(1)
0123 #define UDMA_FLAG_TDTYPE        BIT(2)
0124 #define UDMA_FLAG_BURST_SIZE        BIT(3)
0125 #define UDMA_FLAGS_J7_CLASS     (UDMA_FLAG_PDMA_ACC32 | \
0126                      UDMA_FLAG_PDMA_BURST | \
0127                      UDMA_FLAG_TDTYPE | \
0128                      UDMA_FLAG_BURST_SIZE)
0129 
0130 struct udma_match_data {
0131     enum k3_dma_type type;
0132     u32 psil_base;
0133     bool enable_memcpy_support;
0134     u32 flags;
0135     u32 statictr_z_mask;
0136     u8 burst_size[3];
0137 };
0138 
0139 struct udma_soc_data {
0140     struct udma_oes_offsets oes;
0141     u32 bcdma_trigger_event_offset;
0142 };
0143 
0144 struct udma_hwdesc {
0145     size_t cppi5_desc_size;
0146     void *cppi5_desc_vaddr;
0147     dma_addr_t cppi5_desc_paddr;
0148 
0149     /* TR descriptor internal pointers */
0150     void *tr_req_base;
0151     struct cppi5_tr_resp_t *tr_resp_base;
0152 };
0153 
0154 struct udma_rx_flush {
0155     struct udma_hwdesc hwdescs[2];
0156 
0157     size_t buffer_size;
0158     void *buffer_vaddr;
0159     dma_addr_t buffer_paddr;
0160 };
0161 
0162 struct udma_tpl {
0163     u8 levels;
0164     u32 start_idx[3];
0165 };
0166 
0167 struct udma_dev {
0168     struct dma_device ddev;
0169     struct device *dev;
0170     void __iomem *mmrs[MMR_LAST];
0171     const struct udma_match_data *match_data;
0172     const struct udma_soc_data *soc_data;
0173 
0174     struct udma_tpl bchan_tpl;
0175     struct udma_tpl tchan_tpl;
0176     struct udma_tpl rchan_tpl;
0177 
0178     size_t desc_align; /* alignment to use for descriptors */
0179 
0180     struct udma_tisci_rm tisci_rm;
0181 
0182     struct k3_ringacc *ringacc;
0183 
0184     struct work_struct purge_work;
0185     struct list_head desc_to_purge;
0186     spinlock_t lock;
0187 
0188     struct udma_rx_flush rx_flush;
0189 
0190     int bchan_cnt;
0191     int tchan_cnt;
0192     int echan_cnt;
0193     int rchan_cnt;
0194     int rflow_cnt;
0195     int tflow_cnt;
0196     unsigned long *bchan_map;
0197     unsigned long *tchan_map;
0198     unsigned long *rchan_map;
0199     unsigned long *rflow_gp_map;
0200     unsigned long *rflow_gp_map_allocated;
0201     unsigned long *rflow_in_use;
0202     unsigned long *tflow_map;
0203 
0204     struct udma_bchan *bchans;
0205     struct udma_tchan *tchans;
0206     struct udma_rchan *rchans;
0207     struct udma_rflow *rflows;
0208 
0209     struct udma_chan *channels;
0210     u32 psil_base;
0211     u32 atype;
0212     u32 asel;
0213 };
0214 
0215 struct udma_desc {
0216     struct virt_dma_desc vd;
0217 
0218     bool terminated;
0219 
0220     enum dma_transfer_direction dir;
0221 
0222     struct udma_static_tr static_tr;
0223     u32 residue;
0224 
0225     unsigned int sglen;
0226     unsigned int desc_idx; /* Only used for cyclic in packet mode */
0227     unsigned int tr_idx;
0228 
0229     u32 metadata_size;
0230     void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
0231 
0232     unsigned int hwdesc_count;
0233     struct udma_hwdesc hwdesc[];
0234 };
0235 
0236 enum udma_chan_state {
0237     UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
0238     UDMA_CHAN_IS_ACTIVE, /* Normal operation */
0239     UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
0240 };
0241 
0242 struct udma_tx_drain {
0243     struct delayed_work work;
0244     ktime_t tstamp;
0245     u32 residue;
0246 };
0247 
0248 struct udma_chan_config {
0249     bool pkt_mode; /* TR or packet */
0250     bool needs_epib; /* EPIB is needed for the communication or not */
0251     u32 psd_size; /* size of Protocol Specific Data */
0252     u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
0253     u32 hdesc_size; /* Size of a packet descriptor in packet mode */
0254     bool notdpkt; /* Suppress sending TDC packet */
0255     int remote_thread_id;
0256     u32 atype;
0257     u32 asel;
0258     u32 src_thread;
0259     u32 dst_thread;
0260     enum psil_endpoint_type ep_type;
0261     bool enable_acc32;
0262     bool enable_burst;
0263     enum udma_tp_level channel_tpl; /* Channel Throughput Level */
0264 
0265     u32 tr_trigger_type;
0266 
0267     /* PKDMA mapped channel */
0268     int mapped_channel_id;
0269     /* PKTDMA default tflow or rflow for mapped channel */
0270     int default_flow_id;
0271 
0272     enum dma_transfer_direction dir;
0273 };
0274 
0275 struct udma_chan {
0276     struct virt_dma_chan vc;
0277     struct dma_slave_config cfg;
0278     struct udma_dev *ud;
0279     struct device *dma_dev;
0280     struct udma_desc *desc;
0281     struct udma_desc *terminated_desc;
0282     struct udma_static_tr static_tr;
0283     char *name;
0284 
0285     struct udma_bchan *bchan;
0286     struct udma_tchan *tchan;
0287     struct udma_rchan *rchan;
0288     struct udma_rflow *rflow;
0289 
0290     bool psil_paired;
0291 
0292     int irq_num_ring;
0293     int irq_num_udma;
0294 
0295     bool cyclic;
0296     bool paused;
0297 
0298     enum udma_chan_state state;
0299     struct completion teardown_completed;
0300 
0301     struct udma_tx_drain tx_drain;
0302 
0303     u32 bcnt; /* number of bytes completed since the start of the channel */
0304 
0305     /* Channel configuration parameters */
0306     struct udma_chan_config config;
0307 
0308     /* dmapool for packet mode descriptors */
0309     bool use_dma_pool;
0310     struct dma_pool *hdesc_pool;
0311 
0312     u32 id;
0313 };
0314 
0315 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
0316 {
0317     return container_of(d, struct udma_dev, ddev);
0318 }
0319 
0320 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
0321 {
0322     return container_of(c, struct udma_chan, vc.chan);
0323 }
0324 
0325 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
0326 {
0327     return container_of(t, struct udma_desc, vd.tx);
0328 }
0329 
0330 /* Generic register access functions */
0331 static inline u32 udma_read(void __iomem *base, int reg)
0332 {
0333     return readl(base + reg);
0334 }
0335 
0336 static inline void udma_write(void __iomem *base, int reg, u32 val)
0337 {
0338     writel(val, base + reg);
0339 }
0340 
0341 static inline void udma_update_bits(void __iomem *base, int reg,
0342                     u32 mask, u32 val)
0343 {
0344     u32 tmp, orig;
0345 
0346     orig = readl(base + reg);
0347     tmp = orig & ~mask;
0348     tmp |= (val & mask);
0349 
0350     if (tmp != orig)
0351         writel(tmp, base + reg);
0352 }
0353 
0354 /* TCHANRT */
0355 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
0356 {
0357     if (!uc->tchan)
0358         return 0;
0359     return udma_read(uc->tchan->reg_rt, reg);
0360 }
0361 
0362 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
0363 {
0364     if (!uc->tchan)
0365         return;
0366     udma_write(uc->tchan->reg_rt, reg, val);
0367 }
0368 
0369 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
0370                         u32 mask, u32 val)
0371 {
0372     if (!uc->tchan)
0373         return;
0374     udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
0375 }
0376 
0377 /* RCHANRT */
0378 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
0379 {
0380     if (!uc->rchan)
0381         return 0;
0382     return udma_read(uc->rchan->reg_rt, reg);
0383 }
0384 
0385 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
0386 {
0387     if (!uc->rchan)
0388         return;
0389     udma_write(uc->rchan->reg_rt, reg, val);
0390 }
0391 
0392 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
0393                         u32 mask, u32 val)
0394 {
0395     if (!uc->rchan)
0396         return;
0397     udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
0398 }
0399 
0400 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
0401 {
0402     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
0403 
0404     dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
0405     return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
0406                           tisci_rm->tisci_navss_dev_id,
0407                           src_thread, dst_thread);
0408 }
0409 
0410 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
0411                  u32 dst_thread)
0412 {
0413     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
0414 
0415     dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
0416     return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
0417                         tisci_rm->tisci_navss_dev_id,
0418                         src_thread, dst_thread);
0419 }
0420 
0421 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
0422 {
0423     struct device *chan_dev = &chan->dev->device;
0424 
0425     if (asel == 0) {
0426         /* No special handling for the channel */
0427         chan->dev->chan_dma_dev = false;
0428 
0429         chan_dev->dma_coherent = false;
0430         chan_dev->dma_parms = NULL;
0431     } else if (asel == 14 || asel == 15) {
0432         chan->dev->chan_dma_dev = true;
0433 
0434         chan_dev->dma_coherent = true;
0435         dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
0436         chan_dev->dma_parms = chan_dev->parent->dma_parms;
0437     } else {
0438         dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
0439 
0440         chan_dev->dma_coherent = false;
0441         chan_dev->dma_parms = NULL;
0442     }
0443 }
0444 
0445 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
0446 {
0447     int i;
0448 
0449     for (i = 0; i < tpl_map->levels; i++) {
0450         if (chan_id >= tpl_map->start_idx[i])
0451             return i;
0452     }
0453 
0454     return 0;
0455 }
0456 
0457 static void udma_reset_uchan(struct udma_chan *uc)
0458 {
0459     memset(&uc->config, 0, sizeof(uc->config));
0460     uc->config.remote_thread_id = -1;
0461     uc->config.mapped_channel_id = -1;
0462     uc->config.default_flow_id = -1;
0463     uc->state = UDMA_CHAN_IS_IDLE;
0464 }
0465 
0466 static void udma_dump_chan_stdata(struct udma_chan *uc)
0467 {
0468     struct device *dev = uc->ud->dev;
0469     u32 offset;
0470     int i;
0471 
0472     if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
0473         dev_dbg(dev, "TCHAN State data:\n");
0474         for (i = 0; i < 32; i++) {
0475             offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
0476             dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
0477                 udma_tchanrt_read(uc, offset));
0478         }
0479     }
0480 
0481     if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
0482         dev_dbg(dev, "RCHAN State data:\n");
0483         for (i = 0; i < 32; i++) {
0484             offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
0485             dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
0486                 udma_rchanrt_read(uc, offset));
0487         }
0488     }
0489 }
0490 
0491 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
0492                             int idx)
0493 {
0494     return d->hwdesc[idx].cppi5_desc_paddr;
0495 }
0496 
0497 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
0498 {
0499     return d->hwdesc[idx].cppi5_desc_vaddr;
0500 }
0501 
0502 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
0503                            dma_addr_t paddr)
0504 {
0505     struct udma_desc *d = uc->terminated_desc;
0506 
0507     if (d) {
0508         dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
0509                                    d->desc_idx);
0510 
0511         if (desc_paddr != paddr)
0512             d = NULL;
0513     }
0514 
0515     if (!d) {
0516         d = uc->desc;
0517         if (d) {
0518             dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
0519                                 d->desc_idx);
0520 
0521             if (desc_paddr != paddr)
0522                 d = NULL;
0523         }
0524     }
0525 
0526     return d;
0527 }
0528 
0529 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
0530 {
0531     if (uc->use_dma_pool) {
0532         int i;
0533 
0534         for (i = 0; i < d->hwdesc_count; i++) {
0535             if (!d->hwdesc[i].cppi5_desc_vaddr)
0536                 continue;
0537 
0538             dma_pool_free(uc->hdesc_pool,
0539                       d->hwdesc[i].cppi5_desc_vaddr,
0540                       d->hwdesc[i].cppi5_desc_paddr);
0541 
0542             d->hwdesc[i].cppi5_desc_vaddr = NULL;
0543         }
0544     } else if (d->hwdesc[0].cppi5_desc_vaddr) {
0545         dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
0546                   d->hwdesc[0].cppi5_desc_vaddr,
0547                   d->hwdesc[0].cppi5_desc_paddr);
0548 
0549         d->hwdesc[0].cppi5_desc_vaddr = NULL;
0550     }
0551 }
0552 
0553 static void udma_purge_desc_work(struct work_struct *work)
0554 {
0555     struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
0556     struct virt_dma_desc *vd, *_vd;
0557     unsigned long flags;
0558     LIST_HEAD(head);
0559 
0560     spin_lock_irqsave(&ud->lock, flags);
0561     list_splice_tail_init(&ud->desc_to_purge, &head);
0562     spin_unlock_irqrestore(&ud->lock, flags);
0563 
0564     list_for_each_entry_safe(vd, _vd, &head, node) {
0565         struct udma_chan *uc = to_udma_chan(vd->tx.chan);
0566         struct udma_desc *d = to_udma_desc(&vd->tx);
0567 
0568         udma_free_hwdesc(uc, d);
0569         list_del(&vd->node);
0570         kfree(d);
0571     }
0572 
0573     /* If more to purge, schedule the work again */
0574     if (!list_empty(&ud->desc_to_purge))
0575         schedule_work(&ud->purge_work);
0576 }
0577 
0578 static void udma_desc_free(struct virt_dma_desc *vd)
0579 {
0580     struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
0581     struct udma_chan *uc = to_udma_chan(vd->tx.chan);
0582     struct udma_desc *d = to_udma_desc(&vd->tx);
0583     unsigned long flags;
0584 
0585     if (uc->terminated_desc == d)
0586         uc->terminated_desc = NULL;
0587 
0588     if (uc->use_dma_pool) {
0589         udma_free_hwdesc(uc, d);
0590         kfree(d);
0591         return;
0592     }
0593 
0594     spin_lock_irqsave(&ud->lock, flags);
0595     list_add_tail(&vd->node, &ud->desc_to_purge);
0596     spin_unlock_irqrestore(&ud->lock, flags);
0597 
0598     schedule_work(&ud->purge_work);
0599 }
0600 
0601 static bool udma_is_chan_running(struct udma_chan *uc)
0602 {
0603     u32 trt_ctl = 0;
0604     u32 rrt_ctl = 0;
0605 
0606     if (uc->tchan)
0607         trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
0608     if (uc->rchan)
0609         rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
0610 
0611     if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
0612         return true;
0613 
0614     return false;
0615 }
0616 
0617 static bool udma_is_chan_paused(struct udma_chan *uc)
0618 {
0619     u32 val, pause_mask;
0620 
0621     switch (uc->config.dir) {
0622     case DMA_DEV_TO_MEM:
0623         val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
0624         pause_mask = UDMA_PEER_RT_EN_PAUSE;
0625         break;
0626     case DMA_MEM_TO_DEV:
0627         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
0628         pause_mask = UDMA_PEER_RT_EN_PAUSE;
0629         break;
0630     case DMA_MEM_TO_MEM:
0631         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
0632         pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
0633         break;
0634     default:
0635         return false;
0636     }
0637 
0638     if (val & pause_mask)
0639         return true;
0640 
0641     return false;
0642 }
0643 
0644 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
0645 {
0646     return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
0647 }
0648 
0649 static int udma_push_to_ring(struct udma_chan *uc, int idx)
0650 {
0651     struct udma_desc *d = uc->desc;
0652     struct k3_ring *ring = NULL;
0653     dma_addr_t paddr;
0654 
0655     switch (uc->config.dir) {
0656     case DMA_DEV_TO_MEM:
0657         ring = uc->rflow->fd_ring;
0658         break;
0659     case DMA_MEM_TO_DEV:
0660     case DMA_MEM_TO_MEM:
0661         ring = uc->tchan->t_ring;
0662         break;
0663     default:
0664         return -EINVAL;
0665     }
0666 
0667     /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
0668     if (idx == -1) {
0669         paddr = udma_get_rx_flush_hwdesc_paddr(uc);
0670     } else {
0671         paddr = udma_curr_cppi5_desc_paddr(d, idx);
0672 
0673         wmb(); /* Ensure that writes are not moved over this point */
0674     }
0675 
0676     return k3_ringacc_ring_push(ring, &paddr);
0677 }
0678 
0679 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
0680 {
0681     if (uc->config.dir != DMA_DEV_TO_MEM)
0682         return false;
0683 
0684     if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
0685         return true;
0686 
0687     return false;
0688 }
0689 
0690 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
0691 {
0692     struct k3_ring *ring = NULL;
0693     int ret;
0694 
0695     switch (uc->config.dir) {
0696     case DMA_DEV_TO_MEM:
0697         ring = uc->rflow->r_ring;
0698         break;
0699     case DMA_MEM_TO_DEV:
0700     case DMA_MEM_TO_MEM:
0701         ring = uc->tchan->tc_ring;
0702         break;
0703     default:
0704         return -ENOENT;
0705     }
0706 
0707     ret = k3_ringacc_ring_pop(ring, addr);
0708     if (ret)
0709         return ret;
0710 
0711     rmb(); /* Ensure that reads are not moved before this point */
0712 
0713     /* Teardown completion */
0714     if (cppi5_desc_is_tdcm(*addr))
0715         return 0;
0716 
0717     /* Check for flush descriptor */
0718     if (udma_desc_is_rx_flush(uc, *addr))
0719         return -ENOENT;
0720 
0721     return 0;
0722 }
0723 
0724 static void udma_reset_rings(struct udma_chan *uc)
0725 {
0726     struct k3_ring *ring1 = NULL;
0727     struct k3_ring *ring2 = NULL;
0728 
0729     switch (uc->config.dir) {
0730     case DMA_DEV_TO_MEM:
0731         if (uc->rchan) {
0732             ring1 = uc->rflow->fd_ring;
0733             ring2 = uc->rflow->r_ring;
0734         }
0735         break;
0736     case DMA_MEM_TO_DEV:
0737     case DMA_MEM_TO_MEM:
0738         if (uc->tchan) {
0739             ring1 = uc->tchan->t_ring;
0740             ring2 = uc->tchan->tc_ring;
0741         }
0742         break;
0743     default:
0744         break;
0745     }
0746 
0747     if (ring1)
0748         k3_ringacc_ring_reset_dma(ring1,
0749                       k3_ringacc_ring_get_occ(ring1));
0750     if (ring2)
0751         k3_ringacc_ring_reset(ring2);
0752 
0753     /* make sure we are not leaking memory by stalled descriptor */
0754     if (uc->terminated_desc) {
0755         udma_desc_free(&uc->terminated_desc->vd);
0756         uc->terminated_desc = NULL;
0757     }
0758 }
0759 
0760 static void udma_reset_counters(struct udma_chan *uc)
0761 {
0762     u32 val;
0763 
0764     if (uc->tchan) {
0765         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
0766         udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
0767 
0768         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
0769         udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
0770 
0771         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
0772         udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
0773 
0774         if (!uc->bchan) {
0775             val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
0776             udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
0777         }
0778     }
0779 
0780     if (uc->rchan) {
0781         val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
0782         udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
0783 
0784         val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
0785         udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
0786 
0787         val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
0788         udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
0789 
0790         val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
0791         udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
0792     }
0793 
0794     uc->bcnt = 0;
0795 }
0796 
0797 static int udma_reset_chan(struct udma_chan *uc, bool hard)
0798 {
0799     switch (uc->config.dir) {
0800     case DMA_DEV_TO_MEM:
0801         udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
0802         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
0803         break;
0804     case DMA_MEM_TO_DEV:
0805         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
0806         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
0807         break;
0808     case DMA_MEM_TO_MEM:
0809         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
0810         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
0811         break;
0812     default:
0813         return -EINVAL;
0814     }
0815 
0816     /* Reset all counters */
0817     udma_reset_counters(uc);
0818 
0819     /* Hard reset: re-initialize the channel to reset */
0820     if (hard) {
0821         struct udma_chan_config ucc_backup;
0822         int ret;
0823 
0824         memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
0825         uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
0826 
0827         /* restore the channel configuration */
0828         memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
0829         ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
0830         if (ret)
0831             return ret;
0832 
0833         /*
0834          * Setting forced teardown after forced reset helps recovering
0835          * the rchan.
0836          */
0837         if (uc->config.dir == DMA_DEV_TO_MEM)
0838             udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
0839                        UDMA_CHAN_RT_CTL_EN |
0840                        UDMA_CHAN_RT_CTL_TDOWN |
0841                        UDMA_CHAN_RT_CTL_FTDOWN);
0842     }
0843     uc->state = UDMA_CHAN_IS_IDLE;
0844 
0845     return 0;
0846 }
0847 
0848 static void udma_start_desc(struct udma_chan *uc)
0849 {
0850     struct udma_chan_config *ucc = &uc->config;
0851 
0852     if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
0853         (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
0854         int i;
0855 
0856         /*
0857          * UDMA only: Push all descriptors to ring for packet mode
0858          * cyclic or RX
0859          * PKTDMA supports pre-linked descriptor and cyclic is not
0860          * supported
0861          */
0862         for (i = 0; i < uc->desc->sglen; i++)
0863             udma_push_to_ring(uc, i);
0864     } else {
0865         udma_push_to_ring(uc, 0);
0866     }
0867 }
0868 
0869 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
0870 {
0871     /* Only PDMAs have staticTR */
0872     if (uc->config.ep_type == PSIL_EP_NATIVE)
0873         return false;
0874 
0875     /* Check if the staticTR configuration has changed for TX */
0876     if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
0877         return true;
0878 
0879     return false;
0880 }
0881 
0882 static int udma_start(struct udma_chan *uc)
0883 {
0884     struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
0885 
0886     if (!vd) {
0887         uc->desc = NULL;
0888         return -ENOENT;
0889     }
0890 
0891     list_del(&vd->node);
0892 
0893     uc->desc = to_udma_desc(&vd->tx);
0894 
0895     /* Channel is already running and does not need reconfiguration */
0896     if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
0897         udma_start_desc(uc);
0898         goto out;
0899     }
0900 
0901     /* Make sure that we clear the teardown bit, if it is set */
0902     udma_reset_chan(uc, false);
0903 
0904     /* Push descriptors before we start the channel */
0905     udma_start_desc(uc);
0906 
0907     switch (uc->desc->dir) {
0908     case DMA_DEV_TO_MEM:
0909         /* Config remote TR */
0910         if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
0911             u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
0912                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
0913             const struct udma_match_data *match_data =
0914                             uc->ud->match_data;
0915 
0916             if (uc->config.enable_acc32)
0917                 val |= PDMA_STATIC_TR_XY_ACC32;
0918             if (uc->config.enable_burst)
0919                 val |= PDMA_STATIC_TR_XY_BURST;
0920 
0921             udma_rchanrt_write(uc,
0922                        UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
0923                        val);
0924 
0925             udma_rchanrt_write(uc,
0926                 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
0927                 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
0928                          match_data->statictr_z_mask));
0929 
0930             /* save the current staticTR configuration */
0931             memcpy(&uc->static_tr, &uc->desc->static_tr,
0932                    sizeof(uc->static_tr));
0933         }
0934 
0935         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
0936                    UDMA_CHAN_RT_CTL_EN);
0937 
0938         /* Enable remote */
0939         udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
0940                    UDMA_PEER_RT_EN_ENABLE);
0941 
0942         break;
0943     case DMA_MEM_TO_DEV:
0944         /* Config remote TR */
0945         if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
0946             u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
0947                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
0948 
0949             if (uc->config.enable_acc32)
0950                 val |= PDMA_STATIC_TR_XY_ACC32;
0951             if (uc->config.enable_burst)
0952                 val |= PDMA_STATIC_TR_XY_BURST;
0953 
0954             udma_tchanrt_write(uc,
0955                        UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
0956                        val);
0957 
0958             /* save the current staticTR configuration */
0959             memcpy(&uc->static_tr, &uc->desc->static_tr,
0960                    sizeof(uc->static_tr));
0961         }
0962 
0963         /* Enable remote */
0964         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
0965                    UDMA_PEER_RT_EN_ENABLE);
0966 
0967         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
0968                    UDMA_CHAN_RT_CTL_EN);
0969 
0970         break;
0971     case DMA_MEM_TO_MEM:
0972         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
0973                    UDMA_CHAN_RT_CTL_EN);
0974         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
0975                    UDMA_CHAN_RT_CTL_EN);
0976 
0977         break;
0978     default:
0979         return -EINVAL;
0980     }
0981 
0982     uc->state = UDMA_CHAN_IS_ACTIVE;
0983 out:
0984 
0985     return 0;
0986 }
0987 
0988 static int udma_stop(struct udma_chan *uc)
0989 {
0990     enum udma_chan_state old_state = uc->state;
0991 
0992     uc->state = UDMA_CHAN_IS_TERMINATING;
0993     reinit_completion(&uc->teardown_completed);
0994 
0995     switch (uc->config.dir) {
0996     case DMA_DEV_TO_MEM:
0997         if (!uc->cyclic && !uc->desc)
0998             udma_push_to_ring(uc, -1);
0999 
1000         udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1001                    UDMA_PEER_RT_EN_ENABLE |
1002                    UDMA_PEER_RT_EN_TEARDOWN);
1003         break;
1004     case DMA_MEM_TO_DEV:
1005         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1006                    UDMA_PEER_RT_EN_ENABLE |
1007                    UDMA_PEER_RT_EN_FLUSH);
1008         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1009                    UDMA_CHAN_RT_CTL_EN |
1010                    UDMA_CHAN_RT_CTL_TDOWN);
1011         break;
1012     case DMA_MEM_TO_MEM:
1013         udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1014                    UDMA_CHAN_RT_CTL_EN |
1015                    UDMA_CHAN_RT_CTL_TDOWN);
1016         break;
1017     default:
1018         uc->state = old_state;
1019         complete_all(&uc->teardown_completed);
1020         return -EINVAL;
1021     }
1022 
1023     return 0;
1024 }
1025 
1026 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1027 {
1028     struct udma_desc *d = uc->desc;
1029     struct cppi5_host_desc_t *h_desc;
1030 
1031     h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1032     cppi5_hdesc_reset_to_original(h_desc);
1033     udma_push_to_ring(uc, d->desc_idx);
1034     d->desc_idx = (d->desc_idx + 1) % d->sglen;
1035 }
1036 
1037 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1038 {
1039     struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1040 
1041     memcpy(d->metadata, h_desc->epib, d->metadata_size);
1042 }
1043 
1044 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1045 {
1046     u32 peer_bcnt, bcnt;
1047 
1048     /* Only TX towards PDMA is affected */
1049     if (uc->config.ep_type == PSIL_EP_NATIVE ||
1050         uc->config.dir != DMA_MEM_TO_DEV)
1051         return true;
1052 
1053     peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1054     bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1055 
1056     /* Transfer is incomplete, store current residue and time stamp */
1057     if (peer_bcnt < bcnt) {
1058         uc->tx_drain.residue = bcnt - peer_bcnt;
1059         uc->tx_drain.tstamp = ktime_get();
1060         return false;
1061     }
1062 
1063     return true;
1064 }
1065 
1066 static void udma_check_tx_completion(struct work_struct *work)
1067 {
1068     struct udma_chan *uc = container_of(work, typeof(*uc),
1069                         tx_drain.work.work);
1070     bool desc_done = true;
1071     u32 residue_diff;
1072     ktime_t time_diff;
1073     unsigned long delay;
1074 
1075     while (1) {
1076         if (uc->desc) {
1077             /* Get previous residue and time stamp */
1078             residue_diff = uc->tx_drain.residue;
1079             time_diff = uc->tx_drain.tstamp;
1080             /*
1081              * Get current residue and time stamp or see if
1082              * transfer is complete
1083              */
1084             desc_done = udma_is_desc_really_done(uc, uc->desc);
1085         }
1086 
1087         if (!desc_done) {
1088             /*
1089              * Find the time delta and residue delta w.r.t
1090              * previous poll
1091              */
1092             time_diff = ktime_sub(uc->tx_drain.tstamp,
1093                           time_diff) + 1;
1094             residue_diff -= uc->tx_drain.residue;
1095             if (residue_diff) {
1096                 /*
1097                  * Try to guess when we should check
1098                  * next time by calculating rate at
1099                  * which data is being drained at the
1100                  * peer device
1101                  */
1102                 delay = (time_diff / residue_diff) *
1103                     uc->tx_drain.residue;
1104             } else {
1105                 /* No progress, check again in 1 second  */
1106                 schedule_delayed_work(&uc->tx_drain.work, HZ);
1107                 break;
1108             }
1109 
1110             usleep_range(ktime_to_us(delay),
1111                      ktime_to_us(delay) + 10);
1112             continue;
1113         }
1114 
1115         if (uc->desc) {
1116             struct udma_desc *d = uc->desc;
1117 
1118             uc->bcnt += d->residue;
1119             udma_start(uc);
1120             vchan_cookie_complete(&d->vd);
1121             break;
1122         }
1123 
1124         break;
1125     }
1126 }
1127 
1128 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1129 {
1130     struct udma_chan *uc = data;
1131     struct udma_desc *d;
1132     dma_addr_t paddr = 0;
1133 
1134     if (udma_pop_from_ring(uc, &paddr) || !paddr)
1135         return IRQ_HANDLED;
1136 
1137     spin_lock(&uc->vc.lock);
1138 
1139     /* Teardown completion message */
1140     if (cppi5_desc_is_tdcm(paddr)) {
1141         complete_all(&uc->teardown_completed);
1142 
1143         if (uc->terminated_desc) {
1144             udma_desc_free(&uc->terminated_desc->vd);
1145             uc->terminated_desc = NULL;
1146         }
1147 
1148         if (!uc->desc)
1149             udma_start(uc);
1150 
1151         goto out;
1152     }
1153 
1154     d = udma_udma_desc_from_paddr(uc, paddr);
1155 
1156     if (d) {
1157         dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1158                                    d->desc_idx);
1159         if (desc_paddr != paddr) {
1160             dev_err(uc->ud->dev, "not matching descriptors!\n");
1161             goto out;
1162         }
1163 
1164         if (d == uc->desc) {
1165             /* active descriptor */
1166             if (uc->cyclic) {
1167                 udma_cyclic_packet_elapsed(uc);
1168                 vchan_cyclic_callback(&d->vd);
1169             } else {
1170                 if (udma_is_desc_really_done(uc, d)) {
1171                     uc->bcnt += d->residue;
1172                     udma_start(uc);
1173                     vchan_cookie_complete(&d->vd);
1174                 } else {
1175                     schedule_delayed_work(&uc->tx_drain.work,
1176                                   0);
1177                 }
1178             }
1179         } else {
1180             /*
1181              * terminated descriptor, mark the descriptor as
1182              * completed to update the channel's cookie marker
1183              */
1184             dma_cookie_complete(&d->vd.tx);
1185         }
1186     }
1187 out:
1188     spin_unlock(&uc->vc.lock);
1189 
1190     return IRQ_HANDLED;
1191 }
1192 
1193 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1194 {
1195     struct udma_chan *uc = data;
1196     struct udma_desc *d;
1197 
1198     spin_lock(&uc->vc.lock);
1199     d = uc->desc;
1200     if (d) {
1201         d->tr_idx = (d->tr_idx + 1) % d->sglen;
1202 
1203         if (uc->cyclic) {
1204             vchan_cyclic_callback(&d->vd);
1205         } else {
1206             /* TODO: figure out the real amount of data */
1207             uc->bcnt += d->residue;
1208             udma_start(uc);
1209             vchan_cookie_complete(&d->vd);
1210         }
1211     }
1212 
1213     spin_unlock(&uc->vc.lock);
1214 
1215     return IRQ_HANDLED;
1216 }
1217 
1218 /**
1219  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1220  * @ud: UDMA device
1221  * @from: Start the search from this flow id number
1222  * @cnt: Number of consecutive flow ids to allocate
1223  *
1224  * Allocate range of RX flow ids for future use, those flows can be requested
1225  * only using explicit flow id number. if @from is set to -1 it will try to find
1226  * first free range. if @from is positive value it will force allocation only
1227  * of the specified range of flows.
1228  *
1229  * Returns -ENOMEM if can't find free range.
1230  * -EEXIST if requested range is busy.
1231  * -EINVAL if wrong input values passed.
1232  * Returns flow id on success.
1233  */
1234 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1235 {
1236     int start, tmp_from;
1237     DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1238 
1239     tmp_from = from;
1240     if (tmp_from < 0)
1241         tmp_from = ud->rchan_cnt;
1242     /* default flows can't be allocated and accessible only by id */
1243     if (tmp_from < ud->rchan_cnt)
1244         return -EINVAL;
1245 
1246     if (tmp_from + cnt > ud->rflow_cnt)
1247         return -EINVAL;
1248 
1249     bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1250           ud->rflow_cnt);
1251 
1252     start = bitmap_find_next_zero_area(tmp,
1253                        ud->rflow_cnt,
1254                        tmp_from, cnt, 0);
1255     if (start >= ud->rflow_cnt)
1256         return -ENOMEM;
1257 
1258     if (from >= 0 && start != from)
1259         return -EEXIST;
1260 
1261     bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1262     return start;
1263 }
1264 
1265 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1266 {
1267     if (from < ud->rchan_cnt)
1268         return -EINVAL;
1269     if (from + cnt > ud->rflow_cnt)
1270         return -EINVAL;
1271 
1272     bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1273     return 0;
1274 }
1275 
1276 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1277 {
1278     /*
1279      * Attempt to request rflow by ID can be made for any rflow
1280      * if not in use with assumption that caller knows what's doing.
1281      * TI-SCI FW will perform additional permission check ant way, it's
1282      * safe
1283      */
1284 
1285     if (id < 0 || id >= ud->rflow_cnt)
1286         return ERR_PTR(-ENOENT);
1287 
1288     if (test_bit(id, ud->rflow_in_use))
1289         return ERR_PTR(-ENOENT);
1290 
1291     if (ud->rflow_gp_map) {
1292         /* GP rflow has to be allocated first */
1293         if (!test_bit(id, ud->rflow_gp_map) &&
1294             !test_bit(id, ud->rflow_gp_map_allocated))
1295             return ERR_PTR(-EINVAL);
1296     }
1297 
1298     dev_dbg(ud->dev, "get rflow%d\n", id);
1299     set_bit(id, ud->rflow_in_use);
1300     return &ud->rflows[id];
1301 }
1302 
1303 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1304 {
1305     if (!test_bit(rflow->id, ud->rflow_in_use)) {
1306         dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1307         return;
1308     }
1309 
1310     dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1311     clear_bit(rflow->id, ud->rflow_in_use);
1312 }
1313 
1314 #define UDMA_RESERVE_RESOURCE(res)                  \
1315 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1316                            enum udma_tp_level tpl,  \
1317                            int id)          \
1318 {                                   \
1319     if (id >= 0) {                          \
1320         if (test_bit(id, ud->res##_map)) {          \
1321             dev_err(ud->dev, "res##%d is in use\n", id);    \
1322             return ERR_PTR(-ENOENT);            \
1323         }                           \
1324     } else {                            \
1325         int start;                      \
1326                                     \
1327         if (tpl >= ud->res##_tpl.levels)            \
1328             tpl = ud->res##_tpl.levels - 1;         \
1329                                     \
1330         start = ud->res##_tpl.start_idx[tpl];           \
1331                                     \
1332         id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
1333                     start);             \
1334         if (id == ud->res##_cnt) {              \
1335             return ERR_PTR(-ENOENT);            \
1336         }                           \
1337     }                               \
1338                                     \
1339     set_bit(id, ud->res##_map);                 \
1340     return &ud->res##s[id];                     \
1341 }
1342 
1343 UDMA_RESERVE_RESOURCE(bchan);
1344 UDMA_RESERVE_RESOURCE(tchan);
1345 UDMA_RESERVE_RESOURCE(rchan);
1346 
1347 static int bcdma_get_bchan(struct udma_chan *uc)
1348 {
1349     struct udma_dev *ud = uc->ud;
1350     enum udma_tp_level tpl;
1351     int ret;
1352 
1353     if (uc->bchan) {
1354         dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1355             uc->id, uc->bchan->id);
1356         return 0;
1357     }
1358 
1359     /*
1360      * Use normal channels for peripherals, and highest TPL channel for
1361      * mem2mem
1362      */
1363     if (uc->config.tr_trigger_type)
1364         tpl = 0;
1365     else
1366         tpl = ud->bchan_tpl.levels - 1;
1367 
1368     uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1369     if (IS_ERR(uc->bchan)) {
1370         ret = PTR_ERR(uc->bchan);
1371         uc->bchan = NULL;
1372         return ret;
1373     }
1374 
1375     uc->tchan = uc->bchan;
1376 
1377     return 0;
1378 }
1379 
1380 static int udma_get_tchan(struct udma_chan *uc)
1381 {
1382     struct udma_dev *ud = uc->ud;
1383     int ret;
1384 
1385     if (uc->tchan) {
1386         dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1387             uc->id, uc->tchan->id);
1388         return 0;
1389     }
1390 
1391     /*
1392      * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1393      * For PKTDMA mapped channels it is configured to a channel which must
1394      * be used to service the peripheral.
1395      */
1396     uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1397                      uc->config.mapped_channel_id);
1398     if (IS_ERR(uc->tchan)) {
1399         ret = PTR_ERR(uc->tchan);
1400         uc->tchan = NULL;
1401         return ret;
1402     }
1403 
1404     if (ud->tflow_cnt) {
1405         int tflow_id;
1406 
1407         /* Only PKTDMA have support for tx flows */
1408         if (uc->config.default_flow_id >= 0)
1409             tflow_id = uc->config.default_flow_id;
1410         else
1411             tflow_id = uc->tchan->id;
1412 
1413         if (test_bit(tflow_id, ud->tflow_map)) {
1414             dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1415             clear_bit(uc->tchan->id, ud->tchan_map);
1416             uc->tchan = NULL;
1417             return -ENOENT;
1418         }
1419 
1420         uc->tchan->tflow_id = tflow_id;
1421         set_bit(tflow_id, ud->tflow_map);
1422     } else {
1423         uc->tchan->tflow_id = -1;
1424     }
1425 
1426     return 0;
1427 }
1428 
1429 static int udma_get_rchan(struct udma_chan *uc)
1430 {
1431     struct udma_dev *ud = uc->ud;
1432     int ret;
1433 
1434     if (uc->rchan) {
1435         dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1436             uc->id, uc->rchan->id);
1437         return 0;
1438     }
1439 
1440     /*
1441      * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1442      * For PKTDMA mapped channels it is configured to a channel which must
1443      * be used to service the peripheral.
1444      */
1445     uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1446                      uc->config.mapped_channel_id);
1447     if (IS_ERR(uc->rchan)) {
1448         ret = PTR_ERR(uc->rchan);
1449         uc->rchan = NULL;
1450         return ret;
1451     }
1452 
1453     return 0;
1454 }
1455 
1456 static int udma_get_chan_pair(struct udma_chan *uc)
1457 {
1458     struct udma_dev *ud = uc->ud;
1459     int chan_id, end;
1460 
1461     if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1462         dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1463              uc->id, uc->tchan->id);
1464         return 0;
1465     }
1466 
1467     if (uc->tchan) {
1468         dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1469             uc->id, uc->tchan->id);
1470         return -EBUSY;
1471     } else if (uc->rchan) {
1472         dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1473             uc->id, uc->rchan->id);
1474         return -EBUSY;
1475     }
1476 
1477     /* Can be optimized, but let's have it like this for now */
1478     end = min(ud->tchan_cnt, ud->rchan_cnt);
1479     /*
1480      * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1481      * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1482      */
1483     chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1484     for (; chan_id < end; chan_id++) {
1485         if (!test_bit(chan_id, ud->tchan_map) &&
1486             !test_bit(chan_id, ud->rchan_map))
1487             break;
1488     }
1489 
1490     if (chan_id == end)
1491         return -ENOENT;
1492 
1493     set_bit(chan_id, ud->tchan_map);
1494     set_bit(chan_id, ud->rchan_map);
1495     uc->tchan = &ud->tchans[chan_id];
1496     uc->rchan = &ud->rchans[chan_id];
1497 
1498     /* UDMA does not use tx flows */
1499     uc->tchan->tflow_id = -1;
1500 
1501     return 0;
1502 }
1503 
1504 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1505 {
1506     struct udma_dev *ud = uc->ud;
1507     int ret;
1508 
1509     if (!uc->rchan) {
1510         dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1511         return -EINVAL;
1512     }
1513 
1514     if (uc->rflow) {
1515         dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1516             uc->id, uc->rflow->id);
1517         return 0;
1518     }
1519 
1520     uc->rflow = __udma_get_rflow(ud, flow_id);
1521     if (IS_ERR(uc->rflow)) {
1522         ret = PTR_ERR(uc->rflow);
1523         uc->rflow = NULL;
1524         return ret;
1525     }
1526 
1527     return 0;
1528 }
1529 
1530 static void bcdma_put_bchan(struct udma_chan *uc)
1531 {
1532     struct udma_dev *ud = uc->ud;
1533 
1534     if (uc->bchan) {
1535         dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1536             uc->bchan->id);
1537         clear_bit(uc->bchan->id, ud->bchan_map);
1538         uc->bchan = NULL;
1539         uc->tchan = NULL;
1540     }
1541 }
1542 
1543 static void udma_put_rchan(struct udma_chan *uc)
1544 {
1545     struct udma_dev *ud = uc->ud;
1546 
1547     if (uc->rchan) {
1548         dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1549             uc->rchan->id);
1550         clear_bit(uc->rchan->id, ud->rchan_map);
1551         uc->rchan = NULL;
1552     }
1553 }
1554 
1555 static void udma_put_tchan(struct udma_chan *uc)
1556 {
1557     struct udma_dev *ud = uc->ud;
1558 
1559     if (uc->tchan) {
1560         dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1561             uc->tchan->id);
1562         clear_bit(uc->tchan->id, ud->tchan_map);
1563 
1564         if (uc->tchan->tflow_id >= 0)
1565             clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1566 
1567         uc->tchan = NULL;
1568     }
1569 }
1570 
1571 static void udma_put_rflow(struct udma_chan *uc)
1572 {
1573     struct udma_dev *ud = uc->ud;
1574 
1575     if (uc->rflow) {
1576         dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1577             uc->rflow->id);
1578         __udma_put_rflow(ud, uc->rflow);
1579         uc->rflow = NULL;
1580     }
1581 }
1582 
1583 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1584 {
1585     if (!uc->bchan)
1586         return;
1587 
1588     k3_ringacc_ring_free(uc->bchan->tc_ring);
1589     k3_ringacc_ring_free(uc->bchan->t_ring);
1590     uc->bchan->tc_ring = NULL;
1591     uc->bchan->t_ring = NULL;
1592     k3_configure_chan_coherency(&uc->vc.chan, 0);
1593 
1594     bcdma_put_bchan(uc);
1595 }
1596 
1597 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1598 {
1599     struct k3_ring_cfg ring_cfg;
1600     struct udma_dev *ud = uc->ud;
1601     int ret;
1602 
1603     ret = bcdma_get_bchan(uc);
1604     if (ret)
1605         return ret;
1606 
1607     ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1608                         &uc->bchan->t_ring,
1609                         &uc->bchan->tc_ring);
1610     if (ret) {
1611         ret = -EBUSY;
1612         goto err_ring;
1613     }
1614 
1615     memset(&ring_cfg, 0, sizeof(ring_cfg));
1616     ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1617     ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1618     ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1619 
1620     k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1621     ring_cfg.asel = ud->asel;
1622     ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1623 
1624     ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1625     if (ret)
1626         goto err_ringcfg;
1627 
1628     return 0;
1629 
1630 err_ringcfg:
1631     k3_ringacc_ring_free(uc->bchan->tc_ring);
1632     uc->bchan->tc_ring = NULL;
1633     k3_ringacc_ring_free(uc->bchan->t_ring);
1634     uc->bchan->t_ring = NULL;
1635     k3_configure_chan_coherency(&uc->vc.chan, 0);
1636 err_ring:
1637     bcdma_put_bchan(uc);
1638 
1639     return ret;
1640 }
1641 
1642 static void udma_free_tx_resources(struct udma_chan *uc)
1643 {
1644     if (!uc->tchan)
1645         return;
1646 
1647     k3_ringacc_ring_free(uc->tchan->t_ring);
1648     k3_ringacc_ring_free(uc->tchan->tc_ring);
1649     uc->tchan->t_ring = NULL;
1650     uc->tchan->tc_ring = NULL;
1651 
1652     udma_put_tchan(uc);
1653 }
1654 
1655 static int udma_alloc_tx_resources(struct udma_chan *uc)
1656 {
1657     struct k3_ring_cfg ring_cfg;
1658     struct udma_dev *ud = uc->ud;
1659     struct udma_tchan *tchan;
1660     int ring_idx, ret;
1661 
1662     ret = udma_get_tchan(uc);
1663     if (ret)
1664         return ret;
1665 
1666     tchan = uc->tchan;
1667     if (tchan->tflow_id >= 0)
1668         ring_idx = tchan->tflow_id;
1669     else
1670         ring_idx = ud->bchan_cnt + tchan->id;
1671 
1672     ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1673                         &tchan->t_ring,
1674                         &tchan->tc_ring);
1675     if (ret) {
1676         ret = -EBUSY;
1677         goto err_ring;
1678     }
1679 
1680     memset(&ring_cfg, 0, sizeof(ring_cfg));
1681     ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1682     ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1683     if (ud->match_data->type == DMA_TYPE_UDMA) {
1684         ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1685     } else {
1686         ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1687 
1688         k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1689         ring_cfg.asel = uc->config.asel;
1690         ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1691     }
1692 
1693     ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1694     ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1695 
1696     if (ret)
1697         goto err_ringcfg;
1698 
1699     return 0;
1700 
1701 err_ringcfg:
1702     k3_ringacc_ring_free(uc->tchan->tc_ring);
1703     uc->tchan->tc_ring = NULL;
1704     k3_ringacc_ring_free(uc->tchan->t_ring);
1705     uc->tchan->t_ring = NULL;
1706 err_ring:
1707     udma_put_tchan(uc);
1708 
1709     return ret;
1710 }
1711 
1712 static void udma_free_rx_resources(struct udma_chan *uc)
1713 {
1714     if (!uc->rchan)
1715         return;
1716 
1717     if (uc->rflow) {
1718         struct udma_rflow *rflow = uc->rflow;
1719 
1720         k3_ringacc_ring_free(rflow->fd_ring);
1721         k3_ringacc_ring_free(rflow->r_ring);
1722         rflow->fd_ring = NULL;
1723         rflow->r_ring = NULL;
1724 
1725         udma_put_rflow(uc);
1726     }
1727 
1728     udma_put_rchan(uc);
1729 }
1730 
1731 static int udma_alloc_rx_resources(struct udma_chan *uc)
1732 {
1733     struct udma_dev *ud = uc->ud;
1734     struct k3_ring_cfg ring_cfg;
1735     struct udma_rflow *rflow;
1736     int fd_ring_id;
1737     int ret;
1738 
1739     ret = udma_get_rchan(uc);
1740     if (ret)
1741         return ret;
1742 
1743     /* For MEM_TO_MEM we don't need rflow or rings */
1744     if (uc->config.dir == DMA_MEM_TO_MEM)
1745         return 0;
1746 
1747     if (uc->config.default_flow_id >= 0)
1748         ret = udma_get_rflow(uc, uc->config.default_flow_id);
1749     else
1750         ret = udma_get_rflow(uc, uc->rchan->id);
1751 
1752     if (ret) {
1753         ret = -EBUSY;
1754         goto err_rflow;
1755     }
1756 
1757     rflow = uc->rflow;
1758     if (ud->tflow_cnt)
1759         fd_ring_id = ud->tflow_cnt + rflow->id;
1760     else
1761         fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1762                  uc->rchan->id;
1763 
1764     ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1765                         &rflow->fd_ring, &rflow->r_ring);
1766     if (ret) {
1767         ret = -EBUSY;
1768         goto err_ring;
1769     }
1770 
1771     memset(&ring_cfg, 0, sizeof(ring_cfg));
1772 
1773     ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1774     if (ud->match_data->type == DMA_TYPE_UDMA) {
1775         if (uc->config.pkt_mode)
1776             ring_cfg.size = SG_MAX_SEGMENTS;
1777         else
1778             ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1779 
1780         ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1781     } else {
1782         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1783         ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1784 
1785         k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1786         ring_cfg.asel = uc->config.asel;
1787         ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1788     }
1789 
1790     ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1791 
1792     ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1793     ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1794 
1795     if (ret)
1796         goto err_ringcfg;
1797 
1798     return 0;
1799 
1800 err_ringcfg:
1801     k3_ringacc_ring_free(rflow->r_ring);
1802     rflow->r_ring = NULL;
1803     k3_ringacc_ring_free(rflow->fd_ring);
1804     rflow->fd_ring = NULL;
1805 err_ring:
1806     udma_put_rflow(uc);
1807 err_rflow:
1808     udma_put_rchan(uc);
1809 
1810     return ret;
1811 }
1812 
1813 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (            \
1814     TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |   \
1815     TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1816 
1817 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (            \
1818     TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |   \
1819     TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1820 
1821 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (            \
1822     TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1823 
1824 #define TISCI_UDMA_TCHAN_VALID_PARAMS (             \
1825     TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |   \
1826     TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |  \
1827     TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1828     TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |      \
1829     TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |  \
1830     TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |     \
1831     TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |        \
1832     TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1833 
1834 #define TISCI_UDMA_RCHAN_VALID_PARAMS (             \
1835     TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |   \
1836     TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |     \
1837     TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |        \
1838     TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |      \
1839     TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1840     TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1841     TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1842     TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |  \
1843     TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1844 
1845 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1846 {
1847     struct udma_dev *ud = uc->ud;
1848     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1849     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1850     struct udma_tchan *tchan = uc->tchan;
1851     struct udma_rchan *rchan = uc->rchan;
1852     u8 burst_size = 0;
1853     int ret;
1854     u8 tpl;
1855 
1856     /* Non synchronized - mem to mem type of transfer */
1857     int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1858     struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1859     struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1860 
1861     if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1862         tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1863 
1864         burst_size = ud->match_data->burst_size[tpl];
1865     }
1866 
1867     req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1868     req_tx.nav_id = tisci_rm->tisci_dev_id;
1869     req_tx.index = tchan->id;
1870     req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1871     req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1872     req_tx.txcq_qnum = tc_ring;
1873     req_tx.tx_atype = ud->atype;
1874     if (burst_size) {
1875         req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1876         req_tx.tx_burst_size = burst_size;
1877     }
1878 
1879     ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1880     if (ret) {
1881         dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1882         return ret;
1883     }
1884 
1885     req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1886     req_rx.nav_id = tisci_rm->tisci_dev_id;
1887     req_rx.index = rchan->id;
1888     req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1889     req_rx.rxcq_qnum = tc_ring;
1890     req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1891     req_rx.rx_atype = ud->atype;
1892     if (burst_size) {
1893         req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1894         req_rx.rx_burst_size = burst_size;
1895     }
1896 
1897     ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1898     if (ret)
1899         dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1900 
1901     return ret;
1902 }
1903 
1904 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1905 {
1906     struct udma_dev *ud = uc->ud;
1907     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1908     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1909     struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1910     struct udma_bchan *bchan = uc->bchan;
1911     u8 burst_size = 0;
1912     int ret;
1913     u8 tpl;
1914 
1915     if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1916         tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1917 
1918         burst_size = ud->match_data->burst_size[tpl];
1919     }
1920 
1921     req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1922     req_tx.nav_id = tisci_rm->tisci_dev_id;
1923     req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1924     req_tx.index = bchan->id;
1925     if (burst_size) {
1926         req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1927         req_tx.tx_burst_size = burst_size;
1928     }
1929 
1930     ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1931     if (ret)
1932         dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1933 
1934     return ret;
1935 }
1936 
1937 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1938 {
1939     struct udma_dev *ud = uc->ud;
1940     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1941     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1942     struct udma_tchan *tchan = uc->tchan;
1943     int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1944     struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1945     u32 mode, fetch_size;
1946     int ret;
1947 
1948     if (uc->config.pkt_mode) {
1949         mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1950         fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1951                            uc->config.psd_size, 0);
1952     } else {
1953         mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1954         fetch_size = sizeof(struct cppi5_desc_hdr_t);
1955     }
1956 
1957     req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1958     req_tx.nav_id = tisci_rm->tisci_dev_id;
1959     req_tx.index = tchan->id;
1960     req_tx.tx_chan_type = mode;
1961     req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1962     req_tx.tx_fetch_size = fetch_size >> 2;
1963     req_tx.txcq_qnum = tc_ring;
1964     req_tx.tx_atype = uc->config.atype;
1965     if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1966         ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1967         /* wait for peer to complete the teardown for PDMAs */
1968         req_tx.valid_params |=
1969                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1970         req_tx.tx_tdtype = 1;
1971     }
1972 
1973     ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1974     if (ret)
1975         dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1976 
1977     return ret;
1978 }
1979 
1980 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1981 {
1982     struct udma_dev *ud = uc->ud;
1983     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1984     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1985     struct udma_tchan *tchan = uc->tchan;
1986     struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1987     int ret;
1988 
1989     req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
1990     req_tx.nav_id = tisci_rm->tisci_dev_id;
1991     req_tx.index = tchan->id;
1992     req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1993     if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1994         /* wait for peer to complete the teardown for PDMAs */
1995         req_tx.valid_params |=
1996                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1997         req_tx.tx_tdtype = 1;
1998     }
1999 
2000     ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2001     if (ret)
2002         dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2003 
2004     return ret;
2005 }
2006 
2007 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2008 
2009 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2010 {
2011     struct udma_dev *ud = uc->ud;
2012     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2013     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2014     struct udma_rchan *rchan = uc->rchan;
2015     int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2016     int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2017     struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2018     struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2019     u32 mode, fetch_size;
2020     int ret;
2021 
2022     if (uc->config.pkt_mode) {
2023         mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2024         fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2025                            uc->config.psd_size, 0);
2026     } else {
2027         mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2028         fetch_size = sizeof(struct cppi5_desc_hdr_t);
2029     }
2030 
2031     req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2032     req_rx.nav_id = tisci_rm->tisci_dev_id;
2033     req_rx.index = rchan->id;
2034     req_rx.rx_fetch_size =  fetch_size >> 2;
2035     req_rx.rxcq_qnum = rx_ring;
2036     req_rx.rx_chan_type = mode;
2037     req_rx.rx_atype = uc->config.atype;
2038 
2039     ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2040     if (ret) {
2041         dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2042         return ret;
2043     }
2044 
2045     flow_req.valid_params =
2046         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2047         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2048         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2049         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2050         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2051         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2052         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2053         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2054         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2055         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2056         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2057         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2058         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2059 
2060     flow_req.nav_id = tisci_rm->tisci_dev_id;
2061     flow_req.flow_index = rchan->id;
2062 
2063     if (uc->config.needs_epib)
2064         flow_req.rx_einfo_present = 1;
2065     else
2066         flow_req.rx_einfo_present = 0;
2067     if (uc->config.psd_size)
2068         flow_req.rx_psinfo_present = 1;
2069     else
2070         flow_req.rx_psinfo_present = 0;
2071     flow_req.rx_error_handling = 1;
2072     flow_req.rx_dest_qnum = rx_ring;
2073     flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2074     flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2075     flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2076     flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2077     flow_req.rx_fdq0_sz0_qnum = fd_ring;
2078     flow_req.rx_fdq1_qnum = fd_ring;
2079     flow_req.rx_fdq2_qnum = fd_ring;
2080     flow_req.rx_fdq3_qnum = fd_ring;
2081 
2082     ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2083 
2084     if (ret)
2085         dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2086 
2087     return 0;
2088 }
2089 
2090 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2091 {
2092     struct udma_dev *ud = uc->ud;
2093     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2094     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2095     struct udma_rchan *rchan = uc->rchan;
2096     struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2097     int ret;
2098 
2099     req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2100     req_rx.nav_id = tisci_rm->tisci_dev_id;
2101     req_rx.index = rchan->id;
2102 
2103     ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2104     if (ret)
2105         dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2106 
2107     return ret;
2108 }
2109 
2110 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2111 {
2112     struct udma_dev *ud = uc->ud;
2113     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2114     const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2115     struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2116     struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2117     int ret;
2118 
2119     req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2120     req_rx.nav_id = tisci_rm->tisci_dev_id;
2121     req_rx.index = uc->rchan->id;
2122 
2123     ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2124     if (ret) {
2125         dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2126         return ret;
2127     }
2128 
2129     flow_req.valid_params =
2130         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2131         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2132         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2133 
2134     flow_req.nav_id = tisci_rm->tisci_dev_id;
2135     flow_req.flow_index = uc->rflow->id;
2136 
2137     if (uc->config.needs_epib)
2138         flow_req.rx_einfo_present = 1;
2139     else
2140         flow_req.rx_einfo_present = 0;
2141     if (uc->config.psd_size)
2142         flow_req.rx_psinfo_present = 1;
2143     else
2144         flow_req.rx_psinfo_present = 0;
2145     flow_req.rx_error_handling = 1;
2146 
2147     ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2148 
2149     if (ret)
2150         dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2151             ret);
2152 
2153     return ret;
2154 }
2155 
2156 static int udma_alloc_chan_resources(struct dma_chan *chan)
2157 {
2158     struct udma_chan *uc = to_udma_chan(chan);
2159     struct udma_dev *ud = to_udma_dev(chan->device);
2160     const struct udma_soc_data *soc_data = ud->soc_data;
2161     struct k3_ring *irq_ring;
2162     u32 irq_udma_idx;
2163     int ret;
2164 
2165     uc->dma_dev = ud->dev;
2166 
2167     if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2168         uc->use_dma_pool = true;
2169         /* in case of MEM_TO_MEM we have maximum of two TRs */
2170         if (uc->config.dir == DMA_MEM_TO_MEM) {
2171             uc->config.hdesc_size = cppi5_trdesc_calc_size(
2172                     sizeof(struct cppi5_tr_type15_t), 2);
2173             uc->config.pkt_mode = false;
2174         }
2175     }
2176 
2177     if (uc->use_dma_pool) {
2178         uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2179                          uc->config.hdesc_size,
2180                          ud->desc_align,
2181                          0);
2182         if (!uc->hdesc_pool) {
2183             dev_err(ud->ddev.dev,
2184                 "Descriptor pool allocation failed\n");
2185             uc->use_dma_pool = false;
2186             ret = -ENOMEM;
2187             goto err_cleanup;
2188         }
2189     }
2190 
2191     /*
2192      * Make sure that the completion is in a known state:
2193      * No teardown, the channel is idle
2194      */
2195     reinit_completion(&uc->teardown_completed);
2196     complete_all(&uc->teardown_completed);
2197     uc->state = UDMA_CHAN_IS_IDLE;
2198 
2199     switch (uc->config.dir) {
2200     case DMA_MEM_TO_MEM:
2201         /* Non synchronized - mem to mem type of transfer */
2202         dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2203             uc->id);
2204 
2205         ret = udma_get_chan_pair(uc);
2206         if (ret)
2207             goto err_cleanup;
2208 
2209         ret = udma_alloc_tx_resources(uc);
2210         if (ret) {
2211             udma_put_rchan(uc);
2212             goto err_cleanup;
2213         }
2214 
2215         ret = udma_alloc_rx_resources(uc);
2216         if (ret) {
2217             udma_free_tx_resources(uc);
2218             goto err_cleanup;
2219         }
2220 
2221         uc->config.src_thread = ud->psil_base + uc->tchan->id;
2222         uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2223                     K3_PSIL_DST_THREAD_ID_OFFSET;
2224 
2225         irq_ring = uc->tchan->tc_ring;
2226         irq_udma_idx = uc->tchan->id;
2227 
2228         ret = udma_tisci_m2m_channel_config(uc);
2229         break;
2230     case DMA_MEM_TO_DEV:
2231         /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2232         dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2233             uc->id);
2234 
2235         ret = udma_alloc_tx_resources(uc);
2236         if (ret)
2237             goto err_cleanup;
2238 
2239         uc->config.src_thread = ud->psil_base + uc->tchan->id;
2240         uc->config.dst_thread = uc->config.remote_thread_id;
2241         uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2242 
2243         irq_ring = uc->tchan->tc_ring;
2244         irq_udma_idx = uc->tchan->id;
2245 
2246         ret = udma_tisci_tx_channel_config(uc);
2247         break;
2248     case DMA_DEV_TO_MEM:
2249         /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2250         dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2251             uc->id);
2252 
2253         ret = udma_alloc_rx_resources(uc);
2254         if (ret)
2255             goto err_cleanup;
2256 
2257         uc->config.src_thread = uc->config.remote_thread_id;
2258         uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2259                     K3_PSIL_DST_THREAD_ID_OFFSET;
2260 
2261         irq_ring = uc->rflow->r_ring;
2262         irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2263 
2264         ret = udma_tisci_rx_channel_config(uc);
2265         break;
2266     default:
2267         /* Can not happen */
2268         dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2269             __func__, uc->id, uc->config.dir);
2270         ret = -EINVAL;
2271         goto err_cleanup;
2272 
2273     }
2274 
2275     /* check if the channel configuration was successful */
2276     if (ret)
2277         goto err_res_free;
2278 
2279     if (udma_is_chan_running(uc)) {
2280         dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2281         udma_reset_chan(uc, false);
2282         if (udma_is_chan_running(uc)) {
2283             dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2284             ret = -EBUSY;
2285             goto err_res_free;
2286         }
2287     }
2288 
2289     /* PSI-L pairing */
2290     ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2291     if (ret) {
2292         dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2293             uc->config.src_thread, uc->config.dst_thread);
2294         goto err_res_free;
2295     }
2296 
2297     uc->psil_paired = true;
2298 
2299     uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2300     if (uc->irq_num_ring <= 0) {
2301         dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2302             k3_ringacc_get_ring_id(irq_ring));
2303         ret = -EINVAL;
2304         goto err_psi_free;
2305     }
2306 
2307     ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2308               IRQF_TRIGGER_HIGH, uc->name, uc);
2309     if (ret) {
2310         dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2311         goto err_irq_free;
2312     }
2313 
2314     /* Event from UDMA (TR events) only needed for slave TR mode channels */
2315     if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2316         uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2317         if (uc->irq_num_udma <= 0) {
2318             dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2319                 irq_udma_idx);
2320             free_irq(uc->irq_num_ring, uc);
2321             ret = -EINVAL;
2322             goto err_irq_free;
2323         }
2324 
2325         ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2326                   uc->name, uc);
2327         if (ret) {
2328             dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2329                 uc->id);
2330             free_irq(uc->irq_num_ring, uc);
2331             goto err_irq_free;
2332         }
2333     } else {
2334         uc->irq_num_udma = 0;
2335     }
2336 
2337     udma_reset_rings(uc);
2338 
2339     return 0;
2340 
2341 err_irq_free:
2342     uc->irq_num_ring = 0;
2343     uc->irq_num_udma = 0;
2344 err_psi_free:
2345     navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2346     uc->psil_paired = false;
2347 err_res_free:
2348     udma_free_tx_resources(uc);
2349     udma_free_rx_resources(uc);
2350 err_cleanup:
2351     udma_reset_uchan(uc);
2352 
2353     if (uc->use_dma_pool) {
2354         dma_pool_destroy(uc->hdesc_pool);
2355         uc->use_dma_pool = false;
2356     }
2357 
2358     return ret;
2359 }
2360 
2361 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2362 {
2363     struct udma_chan *uc = to_udma_chan(chan);
2364     struct udma_dev *ud = to_udma_dev(chan->device);
2365     const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2366     u32 irq_udma_idx, irq_ring_idx;
2367     int ret;
2368 
2369     /* Only TR mode is supported */
2370     uc->config.pkt_mode = false;
2371 
2372     /*
2373      * Make sure that the completion is in a known state:
2374      * No teardown, the channel is idle
2375      */
2376     reinit_completion(&uc->teardown_completed);
2377     complete_all(&uc->teardown_completed);
2378     uc->state = UDMA_CHAN_IS_IDLE;
2379 
2380     switch (uc->config.dir) {
2381     case DMA_MEM_TO_MEM:
2382         /* Non synchronized - mem to mem type of transfer */
2383         dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2384             uc->id);
2385 
2386         ret = bcdma_alloc_bchan_resources(uc);
2387         if (ret)
2388             return ret;
2389 
2390         irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2391         irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2392 
2393         ret = bcdma_tisci_m2m_channel_config(uc);
2394         break;
2395     case DMA_MEM_TO_DEV:
2396         /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2397         dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2398             uc->id);
2399 
2400         ret = udma_alloc_tx_resources(uc);
2401         if (ret) {
2402             uc->config.remote_thread_id = -1;
2403             return ret;
2404         }
2405 
2406         uc->config.src_thread = ud->psil_base + uc->tchan->id;
2407         uc->config.dst_thread = uc->config.remote_thread_id;
2408         uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2409 
2410         irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2411         irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2412 
2413         ret = bcdma_tisci_tx_channel_config(uc);
2414         break;
2415     case DMA_DEV_TO_MEM:
2416         /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2417         dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2418             uc->id);
2419 
2420         ret = udma_alloc_rx_resources(uc);
2421         if (ret) {
2422             uc->config.remote_thread_id = -1;
2423             return ret;
2424         }
2425 
2426         uc->config.src_thread = uc->config.remote_thread_id;
2427         uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2428                     K3_PSIL_DST_THREAD_ID_OFFSET;
2429 
2430         irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2431         irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2432 
2433         ret = bcdma_tisci_rx_channel_config(uc);
2434         break;
2435     default:
2436         /* Can not happen */
2437         dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2438             __func__, uc->id, uc->config.dir);
2439         return -EINVAL;
2440     }
2441 
2442     /* check if the channel configuration was successful */
2443     if (ret)
2444         goto err_res_free;
2445 
2446     if (udma_is_chan_running(uc)) {
2447         dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2448         udma_reset_chan(uc, false);
2449         if (udma_is_chan_running(uc)) {
2450             dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2451             ret = -EBUSY;
2452             goto err_res_free;
2453         }
2454     }
2455 
2456     uc->dma_dev = dmaengine_get_dma_device(chan);
2457     if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2458         uc->config.hdesc_size = cppi5_trdesc_calc_size(
2459                     sizeof(struct cppi5_tr_type15_t), 2);
2460 
2461         uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2462                          uc->config.hdesc_size,
2463                          ud->desc_align,
2464                          0);
2465         if (!uc->hdesc_pool) {
2466             dev_err(ud->ddev.dev,
2467                 "Descriptor pool allocation failed\n");
2468             uc->use_dma_pool = false;
2469             ret = -ENOMEM;
2470             goto err_res_free;
2471         }
2472 
2473         uc->use_dma_pool = true;
2474     } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2475         /* PSI-L pairing */
2476         ret = navss_psil_pair(ud, uc->config.src_thread,
2477                       uc->config.dst_thread);
2478         if (ret) {
2479             dev_err(ud->dev,
2480                 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2481                 uc->config.src_thread, uc->config.dst_thread);
2482             goto err_res_free;
2483         }
2484 
2485         uc->psil_paired = true;
2486     }
2487 
2488     uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2489     if (uc->irq_num_ring <= 0) {
2490         dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2491             irq_ring_idx);
2492         ret = -EINVAL;
2493         goto err_psi_free;
2494     }
2495 
2496     ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2497               IRQF_TRIGGER_HIGH, uc->name, uc);
2498     if (ret) {
2499         dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2500         goto err_irq_free;
2501     }
2502 
2503     /* Event from BCDMA (TR events) only needed for slave channels */
2504     if (is_slave_direction(uc->config.dir)) {
2505         uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2506         if (uc->irq_num_udma <= 0) {
2507             dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2508                 irq_udma_idx);
2509             free_irq(uc->irq_num_ring, uc);
2510             ret = -EINVAL;
2511             goto err_irq_free;
2512         }
2513 
2514         ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2515                   uc->name, uc);
2516         if (ret) {
2517             dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2518                 uc->id);
2519             free_irq(uc->irq_num_ring, uc);
2520             goto err_irq_free;
2521         }
2522     } else {
2523         uc->irq_num_udma = 0;
2524     }
2525 
2526     udma_reset_rings(uc);
2527 
2528     INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2529                   udma_check_tx_completion);
2530     return 0;
2531 
2532 err_irq_free:
2533     uc->irq_num_ring = 0;
2534     uc->irq_num_udma = 0;
2535 err_psi_free:
2536     if (uc->psil_paired)
2537         navss_psil_unpair(ud, uc->config.src_thread,
2538                   uc->config.dst_thread);
2539     uc->psil_paired = false;
2540 err_res_free:
2541     bcdma_free_bchan_resources(uc);
2542     udma_free_tx_resources(uc);
2543     udma_free_rx_resources(uc);
2544 
2545     udma_reset_uchan(uc);
2546 
2547     if (uc->use_dma_pool) {
2548         dma_pool_destroy(uc->hdesc_pool);
2549         uc->use_dma_pool = false;
2550     }
2551 
2552     return ret;
2553 }
2554 
2555 static int bcdma_router_config(struct dma_chan *chan)
2556 {
2557     struct k3_event_route_data *router_data = chan->route_data;
2558     struct udma_chan *uc = to_udma_chan(chan);
2559     u32 trigger_event;
2560 
2561     if (!uc->bchan)
2562         return -EINVAL;
2563 
2564     if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2565         return -EINVAL;
2566 
2567     trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2568     trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2569 
2570     return router_data->set_event(router_data->priv, trigger_event);
2571 }
2572 
2573 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2574 {
2575     struct udma_chan *uc = to_udma_chan(chan);
2576     struct udma_dev *ud = to_udma_dev(chan->device);
2577     const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2578     u32 irq_ring_idx;
2579     int ret;
2580 
2581     /*
2582      * Make sure that the completion is in a known state:
2583      * No teardown, the channel is idle
2584      */
2585     reinit_completion(&uc->teardown_completed);
2586     complete_all(&uc->teardown_completed);
2587     uc->state = UDMA_CHAN_IS_IDLE;
2588 
2589     switch (uc->config.dir) {
2590     case DMA_MEM_TO_DEV:
2591         /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2592         dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2593             uc->id);
2594 
2595         ret = udma_alloc_tx_resources(uc);
2596         if (ret) {
2597             uc->config.remote_thread_id = -1;
2598             return ret;
2599         }
2600 
2601         uc->config.src_thread = ud->psil_base + uc->tchan->id;
2602         uc->config.dst_thread = uc->config.remote_thread_id;
2603         uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2604 
2605         irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2606 
2607         ret = pktdma_tisci_tx_channel_config(uc);
2608         break;
2609     case DMA_DEV_TO_MEM:
2610         /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2611         dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2612             uc->id);
2613 
2614         ret = udma_alloc_rx_resources(uc);
2615         if (ret) {
2616             uc->config.remote_thread_id = -1;
2617             return ret;
2618         }
2619 
2620         uc->config.src_thread = uc->config.remote_thread_id;
2621         uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2622                     K3_PSIL_DST_THREAD_ID_OFFSET;
2623 
2624         irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2625 
2626         ret = pktdma_tisci_rx_channel_config(uc);
2627         break;
2628     default:
2629         /* Can not happen */
2630         dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2631             __func__, uc->id, uc->config.dir);
2632         return -EINVAL;
2633     }
2634 
2635     /* check if the channel configuration was successful */
2636     if (ret)
2637         goto err_res_free;
2638 
2639     if (udma_is_chan_running(uc)) {
2640         dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2641         udma_reset_chan(uc, false);
2642         if (udma_is_chan_running(uc)) {
2643             dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2644             ret = -EBUSY;
2645             goto err_res_free;
2646         }
2647     }
2648 
2649     uc->dma_dev = dmaengine_get_dma_device(chan);
2650     uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2651                      uc->config.hdesc_size, ud->desc_align,
2652                      0);
2653     if (!uc->hdesc_pool) {
2654         dev_err(ud->ddev.dev,
2655             "Descriptor pool allocation failed\n");
2656         uc->use_dma_pool = false;
2657         ret = -ENOMEM;
2658         goto err_res_free;
2659     }
2660 
2661     uc->use_dma_pool = true;
2662 
2663     /* PSI-L pairing */
2664     ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2665     if (ret) {
2666         dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2667             uc->config.src_thread, uc->config.dst_thread);
2668         goto err_res_free;
2669     }
2670 
2671     uc->psil_paired = true;
2672 
2673     uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2674     if (uc->irq_num_ring <= 0) {
2675         dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2676             irq_ring_idx);
2677         ret = -EINVAL;
2678         goto err_psi_free;
2679     }
2680 
2681     ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2682               IRQF_TRIGGER_HIGH, uc->name, uc);
2683     if (ret) {
2684         dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2685         goto err_irq_free;
2686     }
2687 
2688     uc->irq_num_udma = 0;
2689 
2690     udma_reset_rings(uc);
2691 
2692     INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2693                   udma_check_tx_completion);
2694 
2695     if (uc->tchan)
2696         dev_dbg(ud->dev,
2697             "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2698             uc->id, uc->tchan->id, uc->tchan->tflow_id,
2699             uc->config.remote_thread_id);
2700     else if (uc->rchan)
2701         dev_dbg(ud->dev,
2702             "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2703             uc->id, uc->rchan->id, uc->rflow->id,
2704             uc->config.remote_thread_id);
2705     return 0;
2706 
2707 err_irq_free:
2708     uc->irq_num_ring = 0;
2709 err_psi_free:
2710     navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2711     uc->psil_paired = false;
2712 err_res_free:
2713     udma_free_tx_resources(uc);
2714     udma_free_rx_resources(uc);
2715 
2716     udma_reset_uchan(uc);
2717 
2718     dma_pool_destroy(uc->hdesc_pool);
2719     uc->use_dma_pool = false;
2720 
2721     return ret;
2722 }
2723 
2724 static int udma_slave_config(struct dma_chan *chan,
2725                  struct dma_slave_config *cfg)
2726 {
2727     struct udma_chan *uc = to_udma_chan(chan);
2728 
2729     memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2730 
2731     return 0;
2732 }
2733 
2734 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2735                         size_t tr_size, int tr_count,
2736                         enum dma_transfer_direction dir)
2737 {
2738     struct udma_hwdesc *hwdesc;
2739     struct cppi5_desc_hdr_t *tr_desc;
2740     struct udma_desc *d;
2741     u32 reload_count = 0;
2742     u32 ring_id;
2743 
2744     switch (tr_size) {
2745     case 16:
2746     case 32:
2747     case 64:
2748     case 128:
2749         break;
2750     default:
2751         dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2752         return NULL;
2753     }
2754 
2755     /* We have only one descriptor containing multiple TRs */
2756     d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2757     if (!d)
2758         return NULL;
2759 
2760     d->sglen = tr_count;
2761 
2762     d->hwdesc_count = 1;
2763     hwdesc = &d->hwdesc[0];
2764 
2765     /* Allocate memory for DMA ring descriptor */
2766     if (uc->use_dma_pool) {
2767         hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2768         hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2769                         GFP_NOWAIT,
2770                         &hwdesc->cppi5_desc_paddr);
2771     } else {
2772         hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2773                                  tr_count);
2774         hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2775                         uc->ud->desc_align);
2776         hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2777                         hwdesc->cppi5_desc_size,
2778                         &hwdesc->cppi5_desc_paddr,
2779                         GFP_NOWAIT);
2780     }
2781 
2782     if (!hwdesc->cppi5_desc_vaddr) {
2783         kfree(d);
2784         return NULL;
2785     }
2786 
2787     /* Start of the TR req records */
2788     hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2789     /* Start address of the TR response array */
2790     hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2791 
2792     tr_desc = hwdesc->cppi5_desc_vaddr;
2793 
2794     if (uc->cyclic)
2795         reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2796 
2797     if (dir == DMA_DEV_TO_MEM)
2798         ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2799     else
2800         ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2801 
2802     cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2803     cppi5_desc_set_pktids(tr_desc, uc->id,
2804                   CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2805     cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2806 
2807     return d;
2808 }
2809 
2810 /**
2811  * udma_get_tr_counters - calculate TR counters for a given length
2812  * @len: Length of the trasnfer
2813  * @align_to: Preferred alignment
2814  * @tr0_cnt0: First TR icnt0
2815  * @tr0_cnt1: First TR icnt1
2816  * @tr1_cnt0: Second (if used) TR icnt0
2817  *
2818  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2819  * For len >= SZ_64K two TRs are used in a simple way:
2820  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2821  * Second TR: the remaining length (tr1_cnt0)
2822  *
2823  * Returns the number of TRs the length needs (1 or 2)
2824  * -EINVAL if the length can not be supported
2825  */
2826 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2827                 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2828 {
2829     if (len < SZ_64K) {
2830         *tr0_cnt0 = len;
2831         *tr0_cnt1 = 1;
2832 
2833         return 1;
2834     }
2835 
2836     if (align_to > 3)
2837         align_to = 3;
2838 
2839 realign:
2840     *tr0_cnt0 = SZ_64K - BIT(align_to);
2841     if (len / *tr0_cnt0 >= SZ_64K) {
2842         if (align_to) {
2843             align_to--;
2844             goto realign;
2845         }
2846         return -EINVAL;
2847     }
2848 
2849     *tr0_cnt1 = len / *tr0_cnt0;
2850     *tr1_cnt0 = len % *tr0_cnt0;
2851 
2852     return 2;
2853 }
2854 
2855 static struct udma_desc *
2856 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2857               unsigned int sglen, enum dma_transfer_direction dir,
2858               unsigned long tx_flags, void *context)
2859 {
2860     struct scatterlist *sgent;
2861     struct udma_desc *d;
2862     struct cppi5_tr_type1_t *tr_req = NULL;
2863     u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2864     unsigned int i;
2865     size_t tr_size;
2866     int num_tr = 0;
2867     int tr_idx = 0;
2868     u64 asel;
2869 
2870     /* estimate the number of TRs we will need */
2871     for_each_sg(sgl, sgent, sglen, i) {
2872         if (sg_dma_len(sgent) < SZ_64K)
2873             num_tr++;
2874         else
2875             num_tr += 2;
2876     }
2877 
2878     /* Now allocate and setup the descriptor. */
2879     tr_size = sizeof(struct cppi5_tr_type1_t);
2880     d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2881     if (!d)
2882         return NULL;
2883 
2884     d->sglen = sglen;
2885 
2886     if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2887         asel = 0;
2888     else
2889         asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2890 
2891     tr_req = d->hwdesc[0].tr_req_base;
2892     for_each_sg(sgl, sgent, sglen, i) {
2893         dma_addr_t sg_addr = sg_dma_address(sgent);
2894 
2895         num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2896                           &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2897         if (num_tr < 0) {
2898             dev_err(uc->ud->dev, "size %u is not supported\n",
2899                 sg_dma_len(sgent));
2900             udma_free_hwdesc(uc, d);
2901             kfree(d);
2902             return NULL;
2903         }
2904 
2905         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2906                   false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2907         cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2908 
2909         sg_addr |= asel;
2910         tr_req[tr_idx].addr = sg_addr;
2911         tr_req[tr_idx].icnt0 = tr0_cnt0;
2912         tr_req[tr_idx].icnt1 = tr0_cnt1;
2913         tr_req[tr_idx].dim1 = tr0_cnt0;
2914         tr_idx++;
2915 
2916         if (num_tr == 2) {
2917             cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2918                       false, false,
2919                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2920             cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2921                      CPPI5_TR_CSF_SUPR_EVT);
2922 
2923             tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2924             tr_req[tr_idx].icnt0 = tr1_cnt0;
2925             tr_req[tr_idx].icnt1 = 1;
2926             tr_req[tr_idx].dim1 = tr1_cnt0;
2927             tr_idx++;
2928         }
2929 
2930         d->residue += sg_dma_len(sgent);
2931     }
2932 
2933     cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2934              CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2935 
2936     return d;
2937 }
2938 
2939 static struct udma_desc *
2940 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2941                 unsigned int sglen,
2942                 enum dma_transfer_direction dir,
2943                 unsigned long tx_flags, void *context)
2944 {
2945     struct scatterlist *sgent;
2946     struct cppi5_tr_type15_t *tr_req = NULL;
2947     enum dma_slave_buswidth dev_width;
2948     u16 tr_cnt0, tr_cnt1;
2949     dma_addr_t dev_addr;
2950     struct udma_desc *d;
2951     unsigned int i;
2952     size_t tr_size, sg_len;
2953     int num_tr = 0;
2954     int tr_idx = 0;
2955     u32 burst, trigger_size, port_window;
2956     u64 asel;
2957 
2958     if (dir == DMA_DEV_TO_MEM) {
2959         dev_addr = uc->cfg.src_addr;
2960         dev_width = uc->cfg.src_addr_width;
2961         burst = uc->cfg.src_maxburst;
2962         port_window = uc->cfg.src_port_window_size;
2963     } else if (dir == DMA_MEM_TO_DEV) {
2964         dev_addr = uc->cfg.dst_addr;
2965         dev_width = uc->cfg.dst_addr_width;
2966         burst = uc->cfg.dst_maxburst;
2967         port_window = uc->cfg.dst_port_window_size;
2968     } else {
2969         dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2970         return NULL;
2971     }
2972 
2973     if (!burst)
2974         burst = 1;
2975 
2976     if (port_window) {
2977         if (port_window != burst) {
2978             dev_err(uc->ud->dev,
2979                 "The burst must be equal to port_window\n");
2980             return NULL;
2981         }
2982 
2983         tr_cnt0 = dev_width * port_window;
2984         tr_cnt1 = 1;
2985     } else {
2986         tr_cnt0 = dev_width;
2987         tr_cnt1 = burst;
2988     }
2989     trigger_size = tr_cnt0 * tr_cnt1;
2990 
2991     /* estimate the number of TRs we will need */
2992     for_each_sg(sgl, sgent, sglen, i) {
2993         sg_len = sg_dma_len(sgent);
2994 
2995         if (sg_len % trigger_size) {
2996             dev_err(uc->ud->dev,
2997                 "Not aligned SG entry (%zu for %u)\n", sg_len,
2998                 trigger_size);
2999             return NULL;
3000         }
3001 
3002         if (sg_len / trigger_size < SZ_64K)
3003             num_tr++;
3004         else
3005             num_tr += 2;
3006     }
3007 
3008     /* Now allocate and setup the descriptor. */
3009     tr_size = sizeof(struct cppi5_tr_type15_t);
3010     d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3011     if (!d)
3012         return NULL;
3013 
3014     d->sglen = sglen;
3015 
3016     if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3017         asel = 0;
3018     } else {
3019         asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3020         dev_addr |= asel;
3021     }
3022 
3023     tr_req = d->hwdesc[0].tr_req_base;
3024     for_each_sg(sgl, sgent, sglen, i) {
3025         u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3026         dma_addr_t sg_addr = sg_dma_address(sgent);
3027 
3028         sg_len = sg_dma_len(sgent);
3029         num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3030                           &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3031         if (num_tr < 0) {
3032             dev_err(uc->ud->dev, "size %zu is not supported\n",
3033                 sg_len);
3034             udma_free_hwdesc(uc, d);
3035             kfree(d);
3036             return NULL;
3037         }
3038 
3039         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3040                   true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3041         cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3042         cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3043                      uc->config.tr_trigger_type,
3044                      CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3045 
3046         sg_addr |= asel;
3047         if (dir == DMA_DEV_TO_MEM) {
3048             tr_req[tr_idx].addr = dev_addr;
3049             tr_req[tr_idx].icnt0 = tr_cnt0;
3050             tr_req[tr_idx].icnt1 = tr_cnt1;
3051             tr_req[tr_idx].icnt2 = tr0_cnt2;
3052             tr_req[tr_idx].icnt3 = tr0_cnt3;
3053             tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3054 
3055             tr_req[tr_idx].daddr = sg_addr;
3056             tr_req[tr_idx].dicnt0 = tr_cnt0;
3057             tr_req[tr_idx].dicnt1 = tr_cnt1;
3058             tr_req[tr_idx].dicnt2 = tr0_cnt2;
3059             tr_req[tr_idx].dicnt3 = tr0_cnt3;
3060             tr_req[tr_idx].ddim1 = tr_cnt0;
3061             tr_req[tr_idx].ddim2 = trigger_size;
3062             tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3063         } else {
3064             tr_req[tr_idx].addr = sg_addr;
3065             tr_req[tr_idx].icnt0 = tr_cnt0;
3066             tr_req[tr_idx].icnt1 = tr_cnt1;
3067             tr_req[tr_idx].icnt2 = tr0_cnt2;
3068             tr_req[tr_idx].icnt3 = tr0_cnt3;
3069             tr_req[tr_idx].dim1 = tr_cnt0;
3070             tr_req[tr_idx].dim2 = trigger_size;
3071             tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3072 
3073             tr_req[tr_idx].daddr = dev_addr;
3074             tr_req[tr_idx].dicnt0 = tr_cnt0;
3075             tr_req[tr_idx].dicnt1 = tr_cnt1;
3076             tr_req[tr_idx].dicnt2 = tr0_cnt2;
3077             tr_req[tr_idx].dicnt3 = tr0_cnt3;
3078             tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3079         }
3080 
3081         tr_idx++;
3082 
3083         if (num_tr == 2) {
3084             cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3085                       false, true,
3086                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3087             cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3088                      CPPI5_TR_CSF_SUPR_EVT);
3089             cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3090                          uc->config.tr_trigger_type,
3091                          CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3092                          0, 0);
3093 
3094             sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3095             if (dir == DMA_DEV_TO_MEM) {
3096                 tr_req[tr_idx].addr = dev_addr;
3097                 tr_req[tr_idx].icnt0 = tr_cnt0;
3098                 tr_req[tr_idx].icnt1 = tr_cnt1;
3099                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3100                 tr_req[tr_idx].icnt3 = 1;
3101                 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3102 
3103                 tr_req[tr_idx].daddr = sg_addr;
3104                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3105                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3106                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3107                 tr_req[tr_idx].dicnt3 = 1;
3108                 tr_req[tr_idx].ddim1 = tr_cnt0;
3109                 tr_req[tr_idx].ddim2 = trigger_size;
3110             } else {
3111                 tr_req[tr_idx].addr = sg_addr;
3112                 tr_req[tr_idx].icnt0 = tr_cnt0;
3113                 tr_req[tr_idx].icnt1 = tr_cnt1;
3114                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3115                 tr_req[tr_idx].icnt3 = 1;
3116                 tr_req[tr_idx].dim1 = tr_cnt0;
3117                 tr_req[tr_idx].dim2 = trigger_size;
3118 
3119                 tr_req[tr_idx].daddr = dev_addr;
3120                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3121                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3122                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3123                 tr_req[tr_idx].dicnt3 = 1;
3124                 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3125             }
3126             tr_idx++;
3127         }
3128 
3129         d->residue += sg_len;
3130     }
3131 
3132     cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3133              CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3134 
3135     return d;
3136 }
3137 
3138 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3139                    enum dma_slave_buswidth dev_width,
3140                    u16 elcnt)
3141 {
3142     if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3143         return 0;
3144 
3145     /* Bus width translates to the element size (ES) */
3146     switch (dev_width) {
3147     case DMA_SLAVE_BUSWIDTH_1_BYTE:
3148         d->static_tr.elsize = 0;
3149         break;
3150     case DMA_SLAVE_BUSWIDTH_2_BYTES:
3151         d->static_tr.elsize = 1;
3152         break;
3153     case DMA_SLAVE_BUSWIDTH_3_BYTES:
3154         d->static_tr.elsize = 2;
3155         break;
3156     case DMA_SLAVE_BUSWIDTH_4_BYTES:
3157         d->static_tr.elsize = 3;
3158         break;
3159     case DMA_SLAVE_BUSWIDTH_8_BYTES:
3160         d->static_tr.elsize = 4;
3161         break;
3162     default: /* not reached */
3163         return -EINVAL;
3164     }
3165 
3166     d->static_tr.elcnt = elcnt;
3167 
3168     /*
3169      * PDMA must to close the packet when the channel is in packet mode.
3170      * For TR mode when the channel is not cyclic we also need PDMA to close
3171      * the packet otherwise the transfer will stall because PDMA holds on
3172      * the data it has received from the peripheral.
3173      */
3174     if (uc->config.pkt_mode || !uc->cyclic) {
3175         unsigned int div = dev_width * elcnt;
3176 
3177         if (uc->cyclic)
3178             d->static_tr.bstcnt = d->residue / d->sglen / div;
3179         else
3180             d->static_tr.bstcnt = d->residue / div;
3181 
3182         if (uc->config.dir == DMA_DEV_TO_MEM &&
3183             d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3184             return -EINVAL;
3185     } else {
3186         d->static_tr.bstcnt = 0;
3187     }
3188 
3189     return 0;
3190 }
3191 
3192 static struct udma_desc *
3193 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3194                unsigned int sglen, enum dma_transfer_direction dir,
3195                unsigned long tx_flags, void *context)
3196 {
3197     struct scatterlist *sgent;
3198     struct cppi5_host_desc_t *h_desc = NULL;
3199     struct udma_desc *d;
3200     u32 ring_id;
3201     unsigned int i;
3202     u64 asel;
3203 
3204     d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3205     if (!d)
3206         return NULL;
3207 
3208     d->sglen = sglen;
3209     d->hwdesc_count = sglen;
3210 
3211     if (dir == DMA_DEV_TO_MEM)
3212         ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3213     else
3214         ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3215 
3216     if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3217         asel = 0;
3218     else
3219         asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3220 
3221     for_each_sg(sgl, sgent, sglen, i) {
3222         struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3223         dma_addr_t sg_addr = sg_dma_address(sgent);
3224         struct cppi5_host_desc_t *desc;
3225         size_t sg_len = sg_dma_len(sgent);
3226 
3227         hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3228                         GFP_NOWAIT,
3229                         &hwdesc->cppi5_desc_paddr);
3230         if (!hwdesc->cppi5_desc_vaddr) {
3231             dev_err(uc->ud->dev,
3232                 "descriptor%d allocation failed\n", i);
3233 
3234             udma_free_hwdesc(uc, d);
3235             kfree(d);
3236             return NULL;
3237         }
3238 
3239         d->residue += sg_len;
3240         hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3241         desc = hwdesc->cppi5_desc_vaddr;
3242 
3243         if (i == 0) {
3244             cppi5_hdesc_init(desc, 0, 0);
3245             /* Flow and Packed ID */
3246             cppi5_desc_set_pktids(&desc->hdr, uc->id,
3247                           CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3248             cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3249         } else {
3250             cppi5_hdesc_reset_hbdesc(desc);
3251             cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3252         }
3253 
3254         /* attach the sg buffer to the descriptor */
3255         sg_addr |= asel;
3256         cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3257 
3258         /* Attach link as host buffer descriptor */
3259         if (h_desc)
3260             cppi5_hdesc_link_hbdesc(h_desc,
3261                         hwdesc->cppi5_desc_paddr | asel);
3262 
3263         if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3264             dir == DMA_MEM_TO_DEV)
3265             h_desc = desc;
3266     }
3267 
3268     if (d->residue >= SZ_4M) {
3269         dev_err(uc->ud->dev,
3270             "%s: Transfer size %u is over the supported 4M range\n",
3271             __func__, d->residue);
3272         udma_free_hwdesc(uc, d);
3273         kfree(d);
3274         return NULL;
3275     }
3276 
3277     h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3278     cppi5_hdesc_set_pktlen(h_desc, d->residue);
3279 
3280     return d;
3281 }
3282 
3283 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3284                 void *data, size_t len)
3285 {
3286     struct udma_desc *d = to_udma_desc(desc);
3287     struct udma_chan *uc = to_udma_chan(desc->chan);
3288     struct cppi5_host_desc_t *h_desc;
3289     u32 psd_size = len;
3290     u32 flags = 0;
3291 
3292     if (!uc->config.pkt_mode || !uc->config.metadata_size)
3293         return -ENOTSUPP;
3294 
3295     if (!data || len > uc->config.metadata_size)
3296         return -EINVAL;
3297 
3298     if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3299         return -EINVAL;
3300 
3301     h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3302     if (d->dir == DMA_MEM_TO_DEV)
3303         memcpy(h_desc->epib, data, len);
3304 
3305     if (uc->config.needs_epib)
3306         psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3307 
3308     d->metadata = data;
3309     d->metadata_size = len;
3310     if (uc->config.needs_epib)
3311         flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3312 
3313     cppi5_hdesc_update_flags(h_desc, flags);
3314     cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3315 
3316     return 0;
3317 }
3318 
3319 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3320                    size_t *payload_len, size_t *max_len)
3321 {
3322     struct udma_desc *d = to_udma_desc(desc);
3323     struct udma_chan *uc = to_udma_chan(desc->chan);
3324     struct cppi5_host_desc_t *h_desc;
3325 
3326     if (!uc->config.pkt_mode || !uc->config.metadata_size)
3327         return ERR_PTR(-ENOTSUPP);
3328 
3329     h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3330 
3331     *max_len = uc->config.metadata_size;
3332 
3333     *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3334                CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3335     *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3336 
3337     return h_desc->epib;
3338 }
3339 
3340 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3341                  size_t payload_len)
3342 {
3343     struct udma_desc *d = to_udma_desc(desc);
3344     struct udma_chan *uc = to_udma_chan(desc->chan);
3345     struct cppi5_host_desc_t *h_desc;
3346     u32 psd_size = payload_len;
3347     u32 flags = 0;
3348 
3349     if (!uc->config.pkt_mode || !uc->config.metadata_size)
3350         return -ENOTSUPP;
3351 
3352     if (payload_len > uc->config.metadata_size)
3353         return -EINVAL;
3354 
3355     if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3356         return -EINVAL;
3357 
3358     h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3359 
3360     if (uc->config.needs_epib) {
3361         psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3362         flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3363     }
3364 
3365     cppi5_hdesc_update_flags(h_desc, flags);
3366     cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3367 
3368     return 0;
3369 }
3370 
3371 static struct dma_descriptor_metadata_ops metadata_ops = {
3372     .attach = udma_attach_metadata,
3373     .get_ptr = udma_get_metadata_ptr,
3374     .set_len = udma_set_metadata_len,
3375 };
3376 
3377 static struct dma_async_tx_descriptor *
3378 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3379            unsigned int sglen, enum dma_transfer_direction dir,
3380            unsigned long tx_flags, void *context)
3381 {
3382     struct udma_chan *uc = to_udma_chan(chan);
3383     enum dma_slave_buswidth dev_width;
3384     struct udma_desc *d;
3385     u32 burst;
3386 
3387     if (dir != uc->config.dir &&
3388         (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3389         dev_err(chan->device->dev,
3390             "%s: chan%d is for %s, not supporting %s\n",
3391             __func__, uc->id,
3392             dmaengine_get_direction_text(uc->config.dir),
3393             dmaengine_get_direction_text(dir));
3394         return NULL;
3395     }
3396 
3397     if (dir == DMA_DEV_TO_MEM) {
3398         dev_width = uc->cfg.src_addr_width;
3399         burst = uc->cfg.src_maxburst;
3400     } else if (dir == DMA_MEM_TO_DEV) {
3401         dev_width = uc->cfg.dst_addr_width;
3402         burst = uc->cfg.dst_maxburst;
3403     } else {
3404         dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3405         return NULL;
3406     }
3407 
3408     if (!burst)
3409         burst = 1;
3410 
3411     if (uc->config.pkt_mode)
3412         d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3413                        context);
3414     else if (is_slave_direction(uc->config.dir))
3415         d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3416                       context);
3417     else
3418         d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3419                             tx_flags, context);
3420 
3421     if (!d)
3422         return NULL;
3423 
3424     d->dir = dir;
3425     d->desc_idx = 0;
3426     d->tr_idx = 0;
3427 
3428     /* static TR for remote PDMA */
3429     if (udma_configure_statictr(uc, d, dev_width, burst)) {
3430         dev_err(uc->ud->dev,
3431             "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3432             __func__, d->static_tr.bstcnt);
3433 
3434         udma_free_hwdesc(uc, d);
3435         kfree(d);
3436         return NULL;
3437     }
3438 
3439     if (uc->config.metadata_size)
3440         d->vd.tx.metadata_ops = &metadata_ops;
3441 
3442     return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3443 }
3444 
3445 static struct udma_desc *
3446 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3447             size_t buf_len, size_t period_len,
3448             enum dma_transfer_direction dir, unsigned long flags)
3449 {
3450     struct udma_desc *d;
3451     size_t tr_size, period_addr;
3452     struct cppi5_tr_type1_t *tr_req;
3453     unsigned int periods = buf_len / period_len;
3454     u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3455     unsigned int i;
3456     int num_tr;
3457 
3458     num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3459                       &tr0_cnt1, &tr1_cnt0);
3460     if (num_tr < 0) {
3461         dev_err(uc->ud->dev, "size %zu is not supported\n",
3462             period_len);
3463         return NULL;
3464     }
3465 
3466     /* Now allocate and setup the descriptor. */
3467     tr_size = sizeof(struct cppi5_tr_type1_t);
3468     d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3469     if (!d)
3470         return NULL;
3471 
3472     tr_req = d->hwdesc[0].tr_req_base;
3473     if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3474         period_addr = buf_addr;
3475     else
3476         period_addr = buf_addr |
3477             ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3478 
3479     for (i = 0; i < periods; i++) {
3480         int tr_idx = i * num_tr;
3481 
3482         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3483                   false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3484 
3485         tr_req[tr_idx].addr = period_addr;
3486         tr_req[tr_idx].icnt0 = tr0_cnt0;
3487         tr_req[tr_idx].icnt1 = tr0_cnt1;
3488         tr_req[tr_idx].dim1 = tr0_cnt0;
3489 
3490         if (num_tr == 2) {
3491             cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3492                      CPPI5_TR_CSF_SUPR_EVT);
3493             tr_idx++;
3494 
3495             cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3496                       false, false,
3497                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3498 
3499             tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3500             tr_req[tr_idx].icnt0 = tr1_cnt0;
3501             tr_req[tr_idx].icnt1 = 1;
3502             tr_req[tr_idx].dim1 = tr1_cnt0;
3503         }
3504 
3505         if (!(flags & DMA_PREP_INTERRUPT))
3506             cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3507                      CPPI5_TR_CSF_SUPR_EVT);
3508 
3509         period_addr += period_len;
3510     }
3511 
3512     return d;
3513 }
3514 
3515 static struct udma_desc *
3516 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3517              size_t buf_len, size_t period_len,
3518              enum dma_transfer_direction dir, unsigned long flags)
3519 {
3520     struct udma_desc *d;
3521     u32 ring_id;
3522     int i;
3523     int periods = buf_len / period_len;
3524 
3525     if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3526         return NULL;
3527 
3528     if (period_len >= SZ_4M)
3529         return NULL;
3530 
3531     d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3532     if (!d)
3533         return NULL;
3534 
3535     d->hwdesc_count = periods;
3536 
3537     /* TODO: re-check this... */
3538     if (dir == DMA_DEV_TO_MEM)
3539         ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3540     else
3541         ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3542 
3543     if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3544         buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3545 
3546     for (i = 0; i < periods; i++) {
3547         struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3548         dma_addr_t period_addr = buf_addr + (period_len * i);
3549         struct cppi5_host_desc_t *h_desc;
3550 
3551         hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3552                         GFP_NOWAIT,
3553                         &hwdesc->cppi5_desc_paddr);
3554         if (!hwdesc->cppi5_desc_vaddr) {
3555             dev_err(uc->ud->dev,
3556                 "descriptor%d allocation failed\n", i);
3557 
3558             udma_free_hwdesc(uc, d);
3559             kfree(d);
3560             return NULL;
3561         }
3562 
3563         hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3564         h_desc = hwdesc->cppi5_desc_vaddr;
3565 
3566         cppi5_hdesc_init(h_desc, 0, 0);
3567         cppi5_hdesc_set_pktlen(h_desc, period_len);
3568 
3569         /* Flow and Packed ID */
3570         cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3571                       CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3572         cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3573 
3574         /* attach each period to a new descriptor */
3575         cppi5_hdesc_attach_buf(h_desc,
3576                        period_addr, period_len,
3577                        period_addr, period_len);
3578     }
3579 
3580     return d;
3581 }
3582 
3583 static struct dma_async_tx_descriptor *
3584 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3585              size_t period_len, enum dma_transfer_direction dir,
3586              unsigned long flags)
3587 {
3588     struct udma_chan *uc = to_udma_chan(chan);
3589     enum dma_slave_buswidth dev_width;
3590     struct udma_desc *d;
3591     u32 burst;
3592 
3593     if (dir != uc->config.dir) {
3594         dev_err(chan->device->dev,
3595             "%s: chan%d is for %s, not supporting %s\n",
3596             __func__, uc->id,
3597             dmaengine_get_direction_text(uc->config.dir),
3598             dmaengine_get_direction_text(dir));
3599         return NULL;
3600     }
3601 
3602     uc->cyclic = true;
3603 
3604     if (dir == DMA_DEV_TO_MEM) {
3605         dev_width = uc->cfg.src_addr_width;
3606         burst = uc->cfg.src_maxburst;
3607     } else if (dir == DMA_MEM_TO_DEV) {
3608         dev_width = uc->cfg.dst_addr_width;
3609         burst = uc->cfg.dst_maxburst;
3610     } else {
3611         dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3612         return NULL;
3613     }
3614 
3615     if (!burst)
3616         burst = 1;
3617 
3618     if (uc->config.pkt_mode)
3619         d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3620                          dir, flags);
3621     else
3622         d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3623                         dir, flags);
3624 
3625     if (!d)
3626         return NULL;
3627 
3628     d->sglen = buf_len / period_len;
3629 
3630     d->dir = dir;
3631     d->residue = buf_len;
3632 
3633     /* static TR for remote PDMA */
3634     if (udma_configure_statictr(uc, d, dev_width, burst)) {
3635         dev_err(uc->ud->dev,
3636             "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3637             __func__, d->static_tr.bstcnt);
3638 
3639         udma_free_hwdesc(uc, d);
3640         kfree(d);
3641         return NULL;
3642     }
3643 
3644     if (uc->config.metadata_size)
3645         d->vd.tx.metadata_ops = &metadata_ops;
3646 
3647     return vchan_tx_prep(&uc->vc, &d->vd, flags);
3648 }
3649 
3650 static struct dma_async_tx_descriptor *
3651 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3652              size_t len, unsigned long tx_flags)
3653 {
3654     struct udma_chan *uc = to_udma_chan(chan);
3655     struct udma_desc *d;
3656     struct cppi5_tr_type15_t *tr_req;
3657     int num_tr;
3658     size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3659     u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3660 
3661     if (uc->config.dir != DMA_MEM_TO_MEM) {
3662         dev_err(chan->device->dev,
3663             "%s: chan%d is for %s, not supporting %s\n",
3664             __func__, uc->id,
3665             dmaengine_get_direction_text(uc->config.dir),
3666             dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3667         return NULL;
3668     }
3669 
3670     num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3671                       &tr0_cnt1, &tr1_cnt0);
3672     if (num_tr < 0) {
3673         dev_err(uc->ud->dev, "size %zu is not supported\n",
3674             len);
3675         return NULL;
3676     }
3677 
3678     d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3679     if (!d)
3680         return NULL;
3681 
3682     d->dir = DMA_MEM_TO_MEM;
3683     d->desc_idx = 0;
3684     d->tr_idx = 0;
3685     d->residue = len;
3686 
3687     if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3688         src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3689         dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3690     }
3691 
3692     tr_req = d->hwdesc[0].tr_req_base;
3693 
3694     cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3695               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3696     cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3697 
3698     tr_req[0].addr = src;
3699     tr_req[0].icnt0 = tr0_cnt0;
3700     tr_req[0].icnt1 = tr0_cnt1;
3701     tr_req[0].icnt2 = 1;
3702     tr_req[0].icnt3 = 1;
3703     tr_req[0].dim1 = tr0_cnt0;
3704 
3705     tr_req[0].daddr = dest;
3706     tr_req[0].dicnt0 = tr0_cnt0;
3707     tr_req[0].dicnt1 = tr0_cnt1;
3708     tr_req[0].dicnt2 = 1;
3709     tr_req[0].dicnt3 = 1;
3710     tr_req[0].ddim1 = tr0_cnt0;
3711 
3712     if (num_tr == 2) {
3713         cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3714                   CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3715         cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3716 
3717         tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3718         tr_req[1].icnt0 = tr1_cnt0;
3719         tr_req[1].icnt1 = 1;
3720         tr_req[1].icnt2 = 1;
3721         tr_req[1].icnt3 = 1;
3722 
3723         tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3724         tr_req[1].dicnt0 = tr1_cnt0;
3725         tr_req[1].dicnt1 = 1;
3726         tr_req[1].dicnt2 = 1;
3727         tr_req[1].dicnt3 = 1;
3728     }
3729 
3730     cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3731              CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3732 
3733     if (uc->config.metadata_size)
3734         d->vd.tx.metadata_ops = &metadata_ops;
3735 
3736     return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3737 }
3738 
3739 static void udma_issue_pending(struct dma_chan *chan)
3740 {
3741     struct udma_chan *uc = to_udma_chan(chan);
3742     unsigned long flags;
3743 
3744     spin_lock_irqsave(&uc->vc.lock, flags);
3745 
3746     /* If we have something pending and no active descriptor, then */
3747     if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3748         /*
3749          * start a descriptor if the channel is NOT [marked as
3750          * terminating _and_ it is still running (teardown has not
3751          * completed yet)].
3752          */
3753         if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3754               udma_is_chan_running(uc)))
3755             udma_start(uc);
3756     }
3757 
3758     spin_unlock_irqrestore(&uc->vc.lock, flags);
3759 }
3760 
3761 static enum dma_status udma_tx_status(struct dma_chan *chan,
3762                       dma_cookie_t cookie,
3763                       struct dma_tx_state *txstate)
3764 {
3765     struct udma_chan *uc = to_udma_chan(chan);
3766     enum dma_status ret;
3767     unsigned long flags;
3768 
3769     spin_lock_irqsave(&uc->vc.lock, flags);
3770 
3771     ret = dma_cookie_status(chan, cookie, txstate);
3772 
3773     if (!udma_is_chan_running(uc))
3774         ret = DMA_COMPLETE;
3775 
3776     if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3777         ret = DMA_PAUSED;
3778 
3779     if (ret == DMA_COMPLETE || !txstate)
3780         goto out;
3781 
3782     if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3783         u32 peer_bcnt = 0;
3784         u32 bcnt = 0;
3785         u32 residue = uc->desc->residue;
3786         u32 delay = 0;
3787 
3788         if (uc->desc->dir == DMA_MEM_TO_DEV) {
3789             bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3790 
3791             if (uc->config.ep_type != PSIL_EP_NATIVE) {
3792                 peer_bcnt = udma_tchanrt_read(uc,
3793                         UDMA_CHAN_RT_PEER_BCNT_REG);
3794 
3795                 if (bcnt > peer_bcnt)
3796                     delay = bcnt - peer_bcnt;
3797             }
3798         } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3799             bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3800 
3801             if (uc->config.ep_type != PSIL_EP_NATIVE) {
3802                 peer_bcnt = udma_rchanrt_read(uc,
3803                         UDMA_CHAN_RT_PEER_BCNT_REG);
3804 
3805                 if (peer_bcnt > bcnt)
3806                     delay = peer_bcnt - bcnt;
3807             }
3808         } else {
3809             bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3810         }
3811 
3812         bcnt -= uc->bcnt;
3813         if (bcnt && !(bcnt % uc->desc->residue))
3814             residue = 0;
3815         else
3816             residue -= bcnt % uc->desc->residue;
3817 
3818         if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3819             ret = DMA_COMPLETE;
3820             delay = 0;
3821         }
3822 
3823         dma_set_residue(txstate, residue);
3824         dma_set_in_flight_bytes(txstate, delay);
3825 
3826     } else {
3827         ret = DMA_COMPLETE;
3828     }
3829 
3830 out:
3831     spin_unlock_irqrestore(&uc->vc.lock, flags);
3832     return ret;
3833 }
3834 
3835 static int udma_pause(struct dma_chan *chan)
3836 {
3837     struct udma_chan *uc = to_udma_chan(chan);
3838 
3839     /* pause the channel */
3840     switch (uc->config.dir) {
3841     case DMA_DEV_TO_MEM:
3842         udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3843                      UDMA_PEER_RT_EN_PAUSE,
3844                      UDMA_PEER_RT_EN_PAUSE);
3845         break;
3846     case DMA_MEM_TO_DEV:
3847         udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3848                      UDMA_PEER_RT_EN_PAUSE,
3849                      UDMA_PEER_RT_EN_PAUSE);
3850         break;
3851     case DMA_MEM_TO_MEM:
3852         udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3853                      UDMA_CHAN_RT_CTL_PAUSE,
3854                      UDMA_CHAN_RT_CTL_PAUSE);
3855         break;
3856     default:
3857         return -EINVAL;
3858     }
3859 
3860     return 0;
3861 }
3862 
3863 static int udma_resume(struct dma_chan *chan)
3864 {
3865     struct udma_chan *uc = to_udma_chan(chan);
3866 
3867     /* resume the channel */
3868     switch (uc->config.dir) {
3869     case DMA_DEV_TO_MEM:
3870         udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3871                      UDMA_PEER_RT_EN_PAUSE, 0);
3872 
3873         break;
3874     case DMA_MEM_TO_DEV:
3875         udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3876                      UDMA_PEER_RT_EN_PAUSE, 0);
3877         break;
3878     case DMA_MEM_TO_MEM:
3879         udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3880                      UDMA_CHAN_RT_CTL_PAUSE, 0);
3881         break;
3882     default:
3883         return -EINVAL;
3884     }
3885 
3886     return 0;
3887 }
3888 
3889 static int udma_terminate_all(struct dma_chan *chan)
3890 {
3891     struct udma_chan *uc = to_udma_chan(chan);
3892     unsigned long flags;
3893     LIST_HEAD(head);
3894 
3895     spin_lock_irqsave(&uc->vc.lock, flags);
3896 
3897     if (udma_is_chan_running(uc))
3898         udma_stop(uc);
3899 
3900     if (uc->desc) {
3901         uc->terminated_desc = uc->desc;
3902         uc->desc = NULL;
3903         uc->terminated_desc->terminated = true;
3904         cancel_delayed_work(&uc->tx_drain.work);
3905     }
3906 
3907     uc->paused = false;
3908 
3909     vchan_get_all_descriptors(&uc->vc, &head);
3910     spin_unlock_irqrestore(&uc->vc.lock, flags);
3911     vchan_dma_desc_free_list(&uc->vc, &head);
3912 
3913     return 0;
3914 }
3915 
3916 static void udma_synchronize(struct dma_chan *chan)
3917 {
3918     struct udma_chan *uc = to_udma_chan(chan);
3919     unsigned long timeout = msecs_to_jiffies(1000);
3920 
3921     vchan_synchronize(&uc->vc);
3922 
3923     if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3924         timeout = wait_for_completion_timeout(&uc->teardown_completed,
3925                               timeout);
3926         if (!timeout) {
3927             dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3928                  uc->id);
3929             udma_dump_chan_stdata(uc);
3930             udma_reset_chan(uc, true);
3931         }
3932     }
3933 
3934     udma_reset_chan(uc, false);
3935     if (udma_is_chan_running(uc))
3936         dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3937 
3938     cancel_delayed_work_sync(&uc->tx_drain.work);
3939     udma_reset_rings(uc);
3940 }
3941 
3942 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3943                    struct virt_dma_desc *vd,
3944                    struct dmaengine_result *result)
3945 {
3946     struct udma_chan *uc = to_udma_chan(&vc->chan);
3947     struct udma_desc *d;
3948 
3949     if (!vd)
3950         return;
3951 
3952     d = to_udma_desc(&vd->tx);
3953 
3954     if (d->metadata_size)
3955         udma_fetch_epib(uc, d);
3956 
3957     /* Provide residue information for the client */
3958     if (result) {
3959         void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3960 
3961         if (cppi5_desc_get_type(desc_vaddr) ==
3962             CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3963             result->residue = d->residue -
3964                       cppi5_hdesc_get_pktlen(desc_vaddr);
3965             if (result->residue)
3966                 result->result = DMA_TRANS_ABORTED;
3967             else
3968                 result->result = DMA_TRANS_NOERROR;
3969         } else {
3970             result->residue = 0;
3971             result->result = DMA_TRANS_NOERROR;
3972         }
3973     }
3974 }
3975 
3976 /*
3977  * This tasklet handles the completion of a DMA descriptor by
3978  * calling its callback and freeing it.
3979  */
3980 static void udma_vchan_complete(struct tasklet_struct *t)
3981 {
3982     struct virt_dma_chan *vc = from_tasklet(vc, t, task);
3983     struct virt_dma_desc *vd, *_vd;
3984     struct dmaengine_desc_callback cb;
3985     LIST_HEAD(head);
3986 
3987     spin_lock_irq(&vc->lock);
3988     list_splice_tail_init(&vc->desc_completed, &head);
3989     vd = vc->cyclic;
3990     if (vd) {
3991         vc->cyclic = NULL;
3992         dmaengine_desc_get_callback(&vd->tx, &cb);
3993     } else {
3994         memset(&cb, 0, sizeof(cb));
3995     }
3996     spin_unlock_irq(&vc->lock);
3997 
3998     udma_desc_pre_callback(vc, vd, NULL);
3999     dmaengine_desc_callback_invoke(&cb, NULL);
4000 
4001     list_for_each_entry_safe(vd, _vd, &head, node) {
4002         struct dmaengine_result result;
4003 
4004         dmaengine_desc_get_callback(&vd->tx, &cb);
4005 
4006         list_del(&vd->node);
4007 
4008         udma_desc_pre_callback(vc, vd, &result);
4009         dmaengine_desc_callback_invoke(&cb, &result);
4010 
4011         vchan_vdesc_fini(vd);
4012     }
4013 }
4014 
4015 static void udma_free_chan_resources(struct dma_chan *chan)
4016 {
4017     struct udma_chan *uc = to_udma_chan(chan);
4018     struct udma_dev *ud = to_udma_dev(chan->device);
4019 
4020     udma_terminate_all(chan);
4021     if (uc->terminated_desc) {
4022         udma_reset_chan(uc, false);
4023         udma_reset_rings(uc);
4024     }
4025 
4026     cancel_delayed_work_sync(&uc->tx_drain.work);
4027 
4028     if (uc->irq_num_ring > 0) {
4029         free_irq(uc->irq_num_ring, uc);
4030 
4031         uc->irq_num_ring = 0;
4032     }
4033     if (uc->irq_num_udma > 0) {
4034         free_irq(uc->irq_num_udma, uc);
4035 
4036         uc->irq_num_udma = 0;
4037     }
4038 
4039     /* Release PSI-L pairing */
4040     if (uc->psil_paired) {
4041         navss_psil_unpair(ud, uc->config.src_thread,
4042                   uc->config.dst_thread);
4043         uc->psil_paired = false;
4044     }
4045 
4046     vchan_free_chan_resources(&uc->vc);
4047     tasklet_kill(&uc->vc.task);
4048 
4049     bcdma_free_bchan_resources(uc);
4050     udma_free_tx_resources(uc);
4051     udma_free_rx_resources(uc);
4052     udma_reset_uchan(uc);
4053 
4054     if (uc->use_dma_pool) {
4055         dma_pool_destroy(uc->hdesc_pool);
4056         uc->use_dma_pool = false;
4057     }
4058 }
4059 
4060 static struct platform_driver udma_driver;
4061 static struct platform_driver bcdma_driver;
4062 static struct platform_driver pktdma_driver;
4063 
4064 struct udma_filter_param {
4065     int remote_thread_id;
4066     u32 atype;
4067     u32 asel;
4068     u32 tr_trigger_type;
4069 };
4070 
4071 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4072 {
4073     struct udma_chan_config *ucc;
4074     struct psil_endpoint_config *ep_config;
4075     struct udma_filter_param *filter_param;
4076     struct udma_chan *uc;
4077     struct udma_dev *ud;
4078 
4079     if (chan->device->dev->driver != &udma_driver.driver &&
4080         chan->device->dev->driver != &bcdma_driver.driver &&
4081         chan->device->dev->driver != &pktdma_driver.driver)
4082         return false;
4083 
4084     uc = to_udma_chan(chan);
4085     ucc = &uc->config;
4086     ud = uc->ud;
4087     filter_param = param;
4088 
4089     if (filter_param->atype > 2) {
4090         dev_err(ud->dev, "Invalid channel atype: %u\n",
4091             filter_param->atype);
4092         return false;
4093     }
4094 
4095     if (filter_param->asel > 15) {
4096         dev_err(ud->dev, "Invalid channel asel: %u\n",
4097             filter_param->asel);
4098         return false;
4099     }
4100 
4101     ucc->remote_thread_id = filter_param->remote_thread_id;
4102     ucc->atype = filter_param->atype;
4103     ucc->asel = filter_param->asel;
4104     ucc->tr_trigger_type = filter_param->tr_trigger_type;
4105 
4106     if (ucc->tr_trigger_type) {
4107         ucc->dir = DMA_MEM_TO_MEM;
4108         goto triggered_bchan;
4109     } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4110         ucc->dir = DMA_MEM_TO_DEV;
4111     } else {
4112         ucc->dir = DMA_DEV_TO_MEM;
4113     }
4114 
4115     ep_config = psil_get_ep_config(ucc->remote_thread_id);
4116     if (IS_ERR(ep_config)) {
4117         dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4118             ucc->remote_thread_id);
4119         ucc->dir = DMA_MEM_TO_MEM;
4120         ucc->remote_thread_id = -1;
4121         ucc->atype = 0;
4122         ucc->asel = 0;
4123         return false;
4124     }
4125 
4126     if (ud->match_data->type == DMA_TYPE_BCDMA &&
4127         ep_config->pkt_mode) {
4128         dev_err(ud->dev,
4129             "Only TR mode is supported (psi-l thread 0x%04x)\n",
4130             ucc->remote_thread_id);
4131         ucc->dir = DMA_MEM_TO_MEM;
4132         ucc->remote_thread_id = -1;
4133         ucc->atype = 0;
4134         ucc->asel = 0;
4135         return false;
4136     }
4137 
4138     ucc->pkt_mode = ep_config->pkt_mode;
4139     ucc->channel_tpl = ep_config->channel_tpl;
4140     ucc->notdpkt = ep_config->notdpkt;
4141     ucc->ep_type = ep_config->ep_type;
4142 
4143     if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4144         ep_config->mapped_channel_id >= 0) {
4145         ucc->mapped_channel_id = ep_config->mapped_channel_id;
4146         ucc->default_flow_id = ep_config->default_flow_id;
4147     } else {
4148         ucc->mapped_channel_id = -1;
4149         ucc->default_flow_id = -1;
4150     }
4151 
4152     if (ucc->ep_type != PSIL_EP_NATIVE) {
4153         const struct udma_match_data *match_data = ud->match_data;
4154 
4155         if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4156             ucc->enable_acc32 = ep_config->pdma_acc32;
4157         if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4158             ucc->enable_burst = ep_config->pdma_burst;
4159     }
4160 
4161     ucc->needs_epib = ep_config->needs_epib;
4162     ucc->psd_size = ep_config->psd_size;
4163     ucc->metadata_size =
4164             (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4165             ucc->psd_size;
4166 
4167     if (ucc->pkt_mode)
4168         ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4169                  ucc->metadata_size, ud->desc_align);
4170 
4171     dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4172         ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4173 
4174     return true;
4175 
4176 triggered_bchan:
4177     dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4178         ucc->tr_trigger_type);
4179 
4180     return true;
4181 
4182 }
4183 
4184 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4185                       struct of_dma *ofdma)
4186 {
4187     struct udma_dev *ud = ofdma->of_dma_data;
4188     dma_cap_mask_t mask = ud->ddev.cap_mask;
4189     struct udma_filter_param filter_param;
4190     struct dma_chan *chan;
4191 
4192     if (ud->match_data->type == DMA_TYPE_BCDMA) {
4193         if (dma_spec->args_count != 3)
4194             return NULL;
4195 
4196         filter_param.tr_trigger_type = dma_spec->args[0];
4197         filter_param.remote_thread_id = dma_spec->args[1];
4198         filter_param.asel = dma_spec->args[2];
4199         filter_param.atype = 0;
4200     } else {
4201         if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4202             return NULL;
4203 
4204         filter_param.remote_thread_id = dma_spec->args[0];
4205         filter_param.tr_trigger_type = 0;
4206         if (dma_spec->args_count == 2) {
4207             if (ud->match_data->type == DMA_TYPE_UDMA) {
4208                 filter_param.atype = dma_spec->args[1];
4209                 filter_param.asel = 0;
4210             } else {
4211                 filter_param.atype = 0;
4212                 filter_param.asel = dma_spec->args[1];
4213             }
4214         } else {
4215             filter_param.atype = 0;
4216             filter_param.asel = 0;
4217         }
4218     }
4219 
4220     chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4221                      ofdma->of_node);
4222     if (!chan) {
4223         dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4224         return ERR_PTR(-EINVAL);
4225     }
4226 
4227     return chan;
4228 }
4229 
4230 static struct udma_match_data am654_main_data = {
4231     .type = DMA_TYPE_UDMA,
4232     .psil_base = 0x1000,
4233     .enable_memcpy_support = true,
4234     .statictr_z_mask = GENMASK(11, 0),
4235     .burst_size = {
4236         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4237         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4238         0, /* No UH Channels */
4239     },
4240 };
4241 
4242 static struct udma_match_data am654_mcu_data = {
4243     .type = DMA_TYPE_UDMA,
4244     .psil_base = 0x6000,
4245     .enable_memcpy_support = false,
4246     .statictr_z_mask = GENMASK(11, 0),
4247     .burst_size = {
4248         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4249         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4250         0, /* No UH Channels */
4251     },
4252 };
4253 
4254 static struct udma_match_data j721e_main_data = {
4255     .type = DMA_TYPE_UDMA,
4256     .psil_base = 0x1000,
4257     .enable_memcpy_support = true,
4258     .flags = UDMA_FLAGS_J7_CLASS,
4259     .statictr_z_mask = GENMASK(23, 0),
4260     .burst_size = {
4261         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4262         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4263         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4264     },
4265 };
4266 
4267 static struct udma_match_data j721e_mcu_data = {
4268     .type = DMA_TYPE_UDMA,
4269     .psil_base = 0x6000,
4270     .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4271     .flags = UDMA_FLAGS_J7_CLASS,
4272     .statictr_z_mask = GENMASK(23, 0),
4273     .burst_size = {
4274         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4275         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4276         0, /* No UH Channels */
4277     },
4278 };
4279 
4280 static struct udma_match_data am64_bcdma_data = {
4281     .type = DMA_TYPE_BCDMA,
4282     .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4283     .enable_memcpy_support = true, /* Supported via bchan */
4284     .flags = UDMA_FLAGS_J7_CLASS,
4285     .statictr_z_mask = GENMASK(23, 0),
4286     .burst_size = {
4287         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4288         0, /* No H Channels */
4289         0, /* No UH Channels */
4290     },
4291 };
4292 
4293 static struct udma_match_data am64_pktdma_data = {
4294     .type = DMA_TYPE_PKTDMA,
4295     .psil_base = 0x1000,
4296     .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4297     .flags = UDMA_FLAGS_J7_CLASS,
4298     .statictr_z_mask = GENMASK(23, 0),
4299     .burst_size = {
4300         TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4301         0, /* No H Channels */
4302         0, /* No UH Channels */
4303     },
4304 };
4305 
4306 static const struct of_device_id udma_of_match[] = {
4307     {
4308         .compatible = "ti,am654-navss-main-udmap",
4309         .data = &am654_main_data,
4310     },
4311     {
4312         .compatible = "ti,am654-navss-mcu-udmap",
4313         .data = &am654_mcu_data,
4314     }, {
4315         .compatible = "ti,j721e-navss-main-udmap",
4316         .data = &j721e_main_data,
4317     }, {
4318         .compatible = "ti,j721e-navss-mcu-udmap",
4319         .data = &j721e_mcu_data,
4320     },
4321     { /* Sentinel */ },
4322 };
4323 
4324 static const struct of_device_id bcdma_of_match[] = {
4325     {
4326         .compatible = "ti,am64-dmss-bcdma",
4327         .data = &am64_bcdma_data,
4328     },
4329     { /* Sentinel */ },
4330 };
4331 
4332 static const struct of_device_id pktdma_of_match[] = {
4333     {
4334         .compatible = "ti,am64-dmss-pktdma",
4335         .data = &am64_pktdma_data,
4336     },
4337     { /* Sentinel */ },
4338 };
4339 
4340 static struct udma_soc_data am654_soc_data = {
4341     .oes = {
4342         .udma_rchan = 0x200,
4343     },
4344 };
4345 
4346 static struct udma_soc_data j721e_soc_data = {
4347     .oes = {
4348         .udma_rchan = 0x400,
4349     },
4350 };
4351 
4352 static struct udma_soc_data j7200_soc_data = {
4353     .oes = {
4354         .udma_rchan = 0x80,
4355     },
4356 };
4357 
4358 static struct udma_soc_data am64_soc_data = {
4359     .oes = {
4360         .bcdma_bchan_data = 0x2200,
4361         .bcdma_bchan_ring = 0x2400,
4362         .bcdma_tchan_data = 0x2800,
4363         .bcdma_tchan_ring = 0x2a00,
4364         .bcdma_rchan_data = 0x2e00,
4365         .bcdma_rchan_ring = 0x3000,
4366         .pktdma_tchan_flow = 0x1200,
4367         .pktdma_rchan_flow = 0x1600,
4368     },
4369     .bcdma_trigger_event_offset = 0xc400,
4370 };
4371 
4372 static const struct soc_device_attribute k3_soc_devices[] = {
4373     { .family = "AM65X", .data = &am654_soc_data },
4374     { .family = "J721E", .data = &j721e_soc_data },
4375     { .family = "J7200", .data = &j7200_soc_data },
4376     { .family = "AM64X", .data = &am64_soc_data },
4377     { .family = "J721S2", .data = &j721e_soc_data},
4378     { .family = "AM62X", .data = &am64_soc_data },
4379     { /* sentinel */ }
4380 };
4381 
4382 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4383 {
4384     u32 cap2, cap3, cap4;
4385     int i;
4386 
4387     ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4388     if (IS_ERR(ud->mmrs[MMR_GCFG]))
4389         return PTR_ERR(ud->mmrs[MMR_GCFG]);
4390 
4391     cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4392     cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4393 
4394     switch (ud->match_data->type) {
4395     case DMA_TYPE_UDMA:
4396         ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4397         ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4398         ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4399         ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4400         break;
4401     case DMA_TYPE_BCDMA:
4402         ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4403         ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4404         ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4405         ud->rflow_cnt = ud->rchan_cnt;
4406         break;
4407     case DMA_TYPE_PKTDMA:
4408         cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4409         ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4410         ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4411         ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4412         ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4413         break;
4414     default:
4415         return -EINVAL;
4416     }
4417 
4418     for (i = 1; i < MMR_LAST; i++) {
4419         if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4420             continue;
4421         if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4422             continue;
4423         if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4424             continue;
4425 
4426         ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4427         if (IS_ERR(ud->mmrs[i]))
4428             return PTR_ERR(ud->mmrs[i]);
4429     }
4430 
4431     return 0;
4432 }
4433 
4434 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4435                       struct ti_sci_resource_desc *rm_desc,
4436                       char *name)
4437 {
4438     bitmap_clear(map, rm_desc->start, rm_desc->num);
4439     bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4440     dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4441         rm_desc->start, rm_desc->num, rm_desc->start_sec,
4442         rm_desc->num_sec);
4443 }
4444 
4445 static const char * const range_names[] = {
4446     [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4447     [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4448     [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4449     [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4450     [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4451 };
4452 
4453 static int udma_setup_resources(struct udma_dev *ud)
4454 {
4455     int ret, i, j;
4456     struct device *dev = ud->dev;
4457     struct ti_sci_resource *rm_res, irq_res;
4458     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4459     u32 cap3;
4460 
4461     /* Set up the throughput level start indexes */
4462     cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4463     if (of_device_is_compatible(dev->of_node,
4464                     "ti,am654-navss-main-udmap")) {
4465         ud->tchan_tpl.levels = 2;
4466         ud->tchan_tpl.start_idx[0] = 8;
4467     } else if (of_device_is_compatible(dev->of_node,
4468                        "ti,am654-navss-mcu-udmap")) {
4469         ud->tchan_tpl.levels = 2;
4470         ud->tchan_tpl.start_idx[0] = 2;
4471     } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4472         ud->tchan_tpl.levels = 3;
4473         ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4474         ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4475     } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4476         ud->tchan_tpl.levels = 2;
4477         ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4478     } else {
4479         ud->tchan_tpl.levels = 1;
4480     }
4481 
4482     ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4483     ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4484     ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4485 
4486     ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4487                        sizeof(unsigned long), GFP_KERNEL);
4488     ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4489                   GFP_KERNEL);
4490     ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4491                        sizeof(unsigned long), GFP_KERNEL);
4492     ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4493                   GFP_KERNEL);
4494     ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4495                           sizeof(unsigned long),
4496                           GFP_KERNEL);
4497     ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4498                           BITS_TO_LONGS(ud->rflow_cnt),
4499                           sizeof(unsigned long),
4500                           GFP_KERNEL);
4501     ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4502                     sizeof(unsigned long),
4503                     GFP_KERNEL);
4504     ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4505                   GFP_KERNEL);
4506 
4507     if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4508         !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4509         !ud->rflows || !ud->rflow_in_use)
4510         return -ENOMEM;
4511 
4512     /*
4513      * RX flows with the same Ids as RX channels are reserved to be used
4514      * as default flows if remote HW can't generate flow_ids. Those
4515      * RX flows can be requested only explicitly by id.
4516      */
4517     bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4518 
4519     /* by default no GP rflows are assigned to Linux */
4520     bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4521 
4522     /* Get resource ranges from tisci */
4523     for (i = 0; i < RM_RANGE_LAST; i++) {
4524         if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4525             continue;
4526 
4527         tisci_rm->rm_ranges[i] =
4528             devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4529                             tisci_rm->tisci_dev_id,
4530                             (char *)range_names[i]);
4531     }
4532 
4533     /* tchan ranges */
4534     rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4535     if (IS_ERR(rm_res)) {
4536         bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4537         irq_res.sets = 1;
4538     } else {
4539         bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4540         for (i = 0; i < rm_res->sets; i++)
4541             udma_mark_resource_ranges(ud, ud->tchan_map,
4542                           &rm_res->desc[i], "tchan");
4543         irq_res.sets = rm_res->sets;
4544     }
4545 
4546     /* rchan and matching default flow ranges */
4547     rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4548     if (IS_ERR(rm_res)) {
4549         bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4550         irq_res.sets++;
4551     } else {
4552         bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4553         for (i = 0; i < rm_res->sets; i++)
4554             udma_mark_resource_ranges(ud, ud->rchan_map,
4555                           &rm_res->desc[i], "rchan");
4556         irq_res.sets += rm_res->sets;
4557     }
4558 
4559     irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4560     if (!irq_res.desc)
4561         return -ENOMEM;
4562     rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4563     if (IS_ERR(rm_res)) {
4564         irq_res.desc[0].start = 0;
4565         irq_res.desc[0].num = ud->tchan_cnt;
4566         i = 1;
4567     } else {
4568         for (i = 0; i < rm_res->sets; i++) {
4569             irq_res.desc[i].start = rm_res->desc[i].start;
4570             irq_res.desc[i].num = rm_res->desc[i].num;
4571             irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4572             irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4573         }
4574     }
4575     rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4576     if (IS_ERR(rm_res)) {
4577         irq_res.desc[i].start = 0;
4578         irq_res.desc[i].num = ud->rchan_cnt;
4579     } else {
4580         for (j = 0; j < rm_res->sets; j++, i++) {
4581             if (rm_res->desc[j].num) {
4582                 irq_res.desc[i].start = rm_res->desc[j].start +
4583                         ud->soc_data->oes.udma_rchan;
4584                 irq_res.desc[i].num = rm_res->desc[j].num;
4585             }
4586             if (rm_res->desc[j].num_sec) {
4587                 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4588                         ud->soc_data->oes.udma_rchan;
4589                 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4590             }
4591         }
4592     }
4593     ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4594     kfree(irq_res.desc);
4595     if (ret) {
4596         dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4597         return ret;
4598     }
4599 
4600     /* GP rflow ranges */
4601     rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4602     if (IS_ERR(rm_res)) {
4603         /* all gp flows are assigned exclusively to Linux */
4604         bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4605                  ud->rflow_cnt - ud->rchan_cnt);
4606     } else {
4607         for (i = 0; i < rm_res->sets; i++)
4608             udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4609                           &rm_res->desc[i], "gp-rflow");
4610     }
4611 
4612     return 0;
4613 }
4614 
4615 static int bcdma_setup_resources(struct udma_dev *ud)
4616 {
4617     int ret, i, j;
4618     struct device *dev = ud->dev;
4619     struct ti_sci_resource *rm_res, irq_res;
4620     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4621     const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4622     u32 cap;
4623 
4624     /* Set up the throughput level start indexes */
4625     cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4626     if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4627         ud->bchan_tpl.levels = 3;
4628         ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4629         ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4630     } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4631         ud->bchan_tpl.levels = 2;
4632         ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4633     } else {
4634         ud->bchan_tpl.levels = 1;
4635     }
4636 
4637     cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4638     if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4639         ud->rchan_tpl.levels = 3;
4640         ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4641         ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4642     } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4643         ud->rchan_tpl.levels = 2;
4644         ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4645     } else {
4646         ud->rchan_tpl.levels = 1;
4647     }
4648 
4649     if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4650         ud->tchan_tpl.levels = 3;
4651         ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4652         ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4653     } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4654         ud->tchan_tpl.levels = 2;
4655         ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4656     } else {
4657         ud->tchan_tpl.levels = 1;
4658     }
4659 
4660     ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4661                        sizeof(unsigned long), GFP_KERNEL);
4662     ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4663                   GFP_KERNEL);
4664     ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4665                        sizeof(unsigned long), GFP_KERNEL);
4666     ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4667                   GFP_KERNEL);
4668     ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4669                        sizeof(unsigned long), GFP_KERNEL);
4670     ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4671                   GFP_KERNEL);
4672     /* BCDMA do not really have flows, but the driver expect it */
4673     ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4674                     sizeof(unsigned long),
4675                     GFP_KERNEL);
4676     ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4677                   GFP_KERNEL);
4678 
4679     if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4680         !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4681         !ud->rflows)
4682         return -ENOMEM;
4683 
4684     /* Get resource ranges from tisci */
4685     for (i = 0; i < RM_RANGE_LAST; i++) {
4686         if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4687             continue;
4688         if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4689             continue;
4690         if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4691             continue;
4692         if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4693             continue;
4694 
4695         tisci_rm->rm_ranges[i] =
4696             devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4697                             tisci_rm->tisci_dev_id,
4698                             (char *)range_names[i]);
4699     }
4700 
4701     irq_res.sets = 0;
4702 
4703     /* bchan ranges */
4704     if (ud->bchan_cnt) {
4705         rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4706         if (IS_ERR(rm_res)) {
4707             bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4708             irq_res.sets++;
4709         } else {
4710             bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4711             for (i = 0; i < rm_res->sets; i++)
4712                 udma_mark_resource_ranges(ud, ud->bchan_map,
4713                               &rm_res->desc[i],
4714                               "bchan");
4715             irq_res.sets += rm_res->sets;
4716         }
4717     }
4718 
4719     /* tchan ranges */
4720     if (ud->tchan_cnt) {
4721         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4722         if (IS_ERR(rm_res)) {
4723             bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4724             irq_res.sets += 2;
4725         } else {
4726             bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4727             for (i = 0; i < rm_res->sets; i++)
4728                 udma_mark_resource_ranges(ud, ud->tchan_map,
4729                               &rm_res->desc[i],
4730                               "tchan");
4731             irq_res.sets += rm_res->sets * 2;
4732         }
4733     }
4734 
4735     /* rchan ranges */
4736     if (ud->rchan_cnt) {
4737         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4738         if (IS_ERR(rm_res)) {
4739             bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4740             irq_res.sets += 2;
4741         } else {
4742             bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4743             for (i = 0; i < rm_res->sets; i++)
4744                 udma_mark_resource_ranges(ud, ud->rchan_map,
4745                               &rm_res->desc[i],
4746                               "rchan");
4747             irq_res.sets += rm_res->sets * 2;
4748         }
4749     }
4750 
4751     irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4752     if (!irq_res.desc)
4753         return -ENOMEM;
4754     if (ud->bchan_cnt) {
4755         rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4756         if (IS_ERR(rm_res)) {
4757             irq_res.desc[0].start = oes->bcdma_bchan_ring;
4758             irq_res.desc[0].num = ud->bchan_cnt;
4759             i = 1;
4760         } else {
4761             for (i = 0; i < rm_res->sets; i++) {
4762                 irq_res.desc[i].start = rm_res->desc[i].start +
4763                             oes->bcdma_bchan_ring;
4764                 irq_res.desc[i].num = rm_res->desc[i].num;
4765             }
4766         }
4767     }
4768     if (ud->tchan_cnt) {
4769         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4770         if (IS_ERR(rm_res)) {
4771             irq_res.desc[i].start = oes->bcdma_tchan_data;
4772             irq_res.desc[i].num = ud->tchan_cnt;
4773             irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4774             irq_res.desc[i + 1].num = ud->tchan_cnt;
4775             i += 2;
4776         } else {
4777             for (j = 0; j < rm_res->sets; j++, i += 2) {
4778                 irq_res.desc[i].start = rm_res->desc[j].start +
4779                             oes->bcdma_tchan_data;
4780                 irq_res.desc[i].num = rm_res->desc[j].num;
4781 
4782                 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4783                             oes->bcdma_tchan_ring;
4784                 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4785             }
4786         }
4787     }
4788     if (ud->rchan_cnt) {
4789         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4790         if (IS_ERR(rm_res)) {
4791             irq_res.desc[i].start = oes->bcdma_rchan_data;
4792             irq_res.desc[i].num = ud->rchan_cnt;
4793             irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4794             irq_res.desc[i + 1].num = ud->rchan_cnt;
4795             i += 2;
4796         } else {
4797             for (j = 0; j < rm_res->sets; j++, i += 2) {
4798                 irq_res.desc[i].start = rm_res->desc[j].start +
4799                             oes->bcdma_rchan_data;
4800                 irq_res.desc[i].num = rm_res->desc[j].num;
4801 
4802                 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4803                             oes->bcdma_rchan_ring;
4804                 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4805             }
4806         }
4807     }
4808 
4809     ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4810     kfree(irq_res.desc);
4811     if (ret) {
4812         dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4813         return ret;
4814     }
4815 
4816     return 0;
4817 }
4818 
4819 static int pktdma_setup_resources(struct udma_dev *ud)
4820 {
4821     int ret, i, j;
4822     struct device *dev = ud->dev;
4823     struct ti_sci_resource *rm_res, irq_res;
4824     struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4825     const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4826     u32 cap3;
4827 
4828     /* Set up the throughput level start indexes */
4829     cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4830     if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4831         ud->tchan_tpl.levels = 3;
4832         ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4833         ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4834     } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4835         ud->tchan_tpl.levels = 2;
4836         ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4837     } else {
4838         ud->tchan_tpl.levels = 1;
4839     }
4840 
4841     ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4842     ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4843     ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4844 
4845     ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4846                        sizeof(unsigned long), GFP_KERNEL);
4847     ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4848                   GFP_KERNEL);
4849     ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4850                        sizeof(unsigned long), GFP_KERNEL);
4851     ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4852                   GFP_KERNEL);
4853     ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4854                     sizeof(unsigned long),
4855                     GFP_KERNEL);
4856     ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4857                   GFP_KERNEL);
4858     ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4859                        sizeof(unsigned long), GFP_KERNEL);
4860 
4861     if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4862         !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4863         return -ENOMEM;
4864 
4865     /* Get resource ranges from tisci */
4866     for (i = 0; i < RM_RANGE_LAST; i++) {
4867         if (i == RM_RANGE_BCHAN)
4868             continue;
4869 
4870         tisci_rm->rm_ranges[i] =
4871             devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4872                             tisci_rm->tisci_dev_id,
4873                             (char *)range_names[i]);
4874     }
4875 
4876     /* tchan ranges */
4877     rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4878     if (IS_ERR(rm_res)) {
4879         bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4880     } else {
4881         bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4882         for (i = 0; i < rm_res->sets; i++)
4883             udma_mark_resource_ranges(ud, ud->tchan_map,
4884                           &rm_res->desc[i], "tchan");
4885     }
4886 
4887     /* rchan ranges */
4888     rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4889     if (IS_ERR(rm_res)) {
4890         bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4891     } else {
4892         bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4893         for (i = 0; i < rm_res->sets; i++)
4894             udma_mark_resource_ranges(ud, ud->rchan_map,
4895                           &rm_res->desc[i], "rchan");
4896     }
4897 
4898     /* rflow ranges */
4899     rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4900     if (IS_ERR(rm_res)) {
4901         /* all rflows are assigned exclusively to Linux */
4902         bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4903         irq_res.sets = 1;
4904     } else {
4905         bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4906         for (i = 0; i < rm_res->sets; i++)
4907             udma_mark_resource_ranges(ud, ud->rflow_in_use,
4908                           &rm_res->desc[i], "rflow");
4909         irq_res.sets = rm_res->sets;
4910     }
4911 
4912     /* tflow ranges */
4913     rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4914     if (IS_ERR(rm_res)) {
4915         /* all tflows are assigned exclusively to Linux */
4916         bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4917         irq_res.sets++;
4918     } else {
4919         bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4920         for (i = 0; i < rm_res->sets; i++)
4921             udma_mark_resource_ranges(ud, ud->tflow_map,
4922                           &rm_res->desc[i], "tflow");
4923         irq_res.sets += rm_res->sets;
4924     }
4925 
4926     irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4927     if (!irq_res.desc)
4928         return -ENOMEM;
4929     rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4930     if (IS_ERR(rm_res)) {
4931         irq_res.desc[0].start = oes->pktdma_tchan_flow;
4932         irq_res.desc[0].num = ud->tflow_cnt;
4933         i = 1;
4934     } else {
4935         for (i = 0; i < rm_res->sets; i++) {
4936             irq_res.desc[i].start = rm_res->desc[i].start +
4937                         oes->pktdma_tchan_flow;
4938             irq_res.desc[i].num = rm_res->desc[i].num;
4939         }
4940     }
4941     rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4942     if (IS_ERR(rm_res)) {
4943         irq_res.desc[i].start = oes->pktdma_rchan_flow;
4944         irq_res.desc[i].num = ud->rflow_cnt;
4945     } else {
4946         for (j = 0; j < rm_res->sets; j++, i++) {
4947             irq_res.desc[i].start = rm_res->desc[j].start +
4948                         oes->pktdma_rchan_flow;
4949             irq_res.desc[i].num = rm_res->desc[j].num;
4950         }
4951     }
4952     ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4953     kfree(irq_res.desc);
4954     if (ret) {
4955         dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4956         return ret;
4957     }
4958 
4959     return 0;
4960 }
4961 
4962 static int setup_resources(struct udma_dev *ud)
4963 {
4964     struct device *dev = ud->dev;
4965     int ch_count, ret;
4966 
4967     switch (ud->match_data->type) {
4968     case DMA_TYPE_UDMA:
4969         ret = udma_setup_resources(ud);
4970         break;
4971     case DMA_TYPE_BCDMA:
4972         ret = bcdma_setup_resources(ud);
4973         break;
4974     case DMA_TYPE_PKTDMA:
4975         ret = pktdma_setup_resources(ud);
4976         break;
4977     default:
4978         return -EINVAL;
4979     }
4980 
4981     if (ret)
4982         return ret;
4983 
4984     ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
4985     if (ud->bchan_cnt)
4986         ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
4987     ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
4988     ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
4989     if (!ch_count)
4990         return -ENODEV;
4991 
4992     ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
4993                     GFP_KERNEL);
4994     if (!ud->channels)
4995         return -ENOMEM;
4996 
4997     switch (ud->match_data->type) {
4998     case DMA_TYPE_UDMA:
4999         dev_info(dev,
5000              "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5001              ch_count,
5002              ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5003                                ud->tchan_cnt),
5004              ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5005                                ud->rchan_cnt),
5006              ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5007                                ud->rflow_cnt));
5008         break;
5009     case DMA_TYPE_BCDMA:
5010         dev_info(dev,
5011              "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5012              ch_count,
5013              ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5014                                ud->bchan_cnt),
5015              ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5016                                ud->tchan_cnt),
5017              ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5018                                ud->rchan_cnt));
5019         break;
5020     case DMA_TYPE_PKTDMA:
5021         dev_info(dev,
5022              "Channels: %d (tchan: %u, rchan: %u)\n",
5023              ch_count,
5024              ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5025                                ud->tchan_cnt),
5026              ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5027                                ud->rchan_cnt));
5028         break;
5029     default:
5030         break;
5031     }
5032 
5033     return ch_count;
5034 }
5035 
5036 static int udma_setup_rx_flush(struct udma_dev *ud)
5037 {
5038     struct udma_rx_flush *rx_flush = &ud->rx_flush;
5039     struct cppi5_desc_hdr_t *tr_desc;
5040     struct cppi5_tr_type1_t *tr_req;
5041     struct cppi5_host_desc_t *desc;
5042     struct device *dev = ud->dev;
5043     struct udma_hwdesc *hwdesc;
5044     size_t tr_size;
5045 
5046     /* Allocate 1K buffer for discarded data on RX channel teardown */
5047     rx_flush->buffer_size = SZ_1K;
5048     rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5049                           GFP_KERNEL);
5050     if (!rx_flush->buffer_vaddr)
5051         return -ENOMEM;
5052 
5053     rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5054                         rx_flush->buffer_size,
5055                         DMA_TO_DEVICE);
5056     if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5057         return -ENOMEM;
5058 
5059     /* Set up descriptor to be used for TR mode */
5060     hwdesc = &rx_flush->hwdescs[0];
5061     tr_size = sizeof(struct cppi5_tr_type1_t);
5062     hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5063     hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5064                     ud->desc_align);
5065 
5066     hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5067                         GFP_KERNEL);
5068     if (!hwdesc->cppi5_desc_vaddr)
5069         return -ENOMEM;
5070 
5071     hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5072                           hwdesc->cppi5_desc_size,
5073                           DMA_TO_DEVICE);
5074     if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5075         return -ENOMEM;
5076 
5077     /* Start of the TR req records */
5078     hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5079     /* Start address of the TR response array */
5080     hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5081 
5082     tr_desc = hwdesc->cppi5_desc_vaddr;
5083     cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5084     cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5085     cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5086 
5087     tr_req = hwdesc->tr_req_base;
5088     cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5089               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5090     cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5091 
5092     tr_req->addr = rx_flush->buffer_paddr;
5093     tr_req->icnt0 = rx_flush->buffer_size;
5094     tr_req->icnt1 = 1;
5095 
5096     dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5097                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5098 
5099     /* Set up descriptor to be used for packet mode */
5100     hwdesc = &rx_flush->hwdescs[1];
5101     hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5102                     CPPI5_INFO0_HDESC_EPIB_SIZE +
5103                     CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5104                     ud->desc_align);
5105 
5106     hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5107                         GFP_KERNEL);
5108     if (!hwdesc->cppi5_desc_vaddr)
5109         return -ENOMEM;
5110 
5111     hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5112                           hwdesc->cppi5_desc_size,
5113                           DMA_TO_DEVICE);
5114     if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5115         return -ENOMEM;
5116 
5117     desc = hwdesc->cppi5_desc_vaddr;
5118     cppi5_hdesc_init(desc, 0, 0);
5119     cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5120     cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5121 
5122     cppi5_hdesc_attach_buf(desc,
5123                    rx_flush->buffer_paddr, rx_flush->buffer_size,
5124                    rx_flush->buffer_paddr, rx_flush->buffer_size);
5125 
5126     dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5127                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5128     return 0;
5129 }
5130 
5131 #ifdef CONFIG_DEBUG_FS
5132 static void udma_dbg_summary_show_chan(struct seq_file *s,
5133                        struct dma_chan *chan)
5134 {
5135     struct udma_chan *uc = to_udma_chan(chan);
5136     struct udma_chan_config *ucc = &uc->config;
5137 
5138     seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5139            chan->dbg_client_name ?: "in-use");
5140     if (ucc->tr_trigger_type)
5141         seq_puts(s, " (triggered, ");
5142     else
5143         seq_printf(s, " (%s, ",
5144                dmaengine_get_direction_text(uc->config.dir));
5145 
5146     switch (uc->config.dir) {
5147     case DMA_MEM_TO_MEM:
5148         if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5149             seq_printf(s, "bchan%d)\n", uc->bchan->id);
5150             return;
5151         }
5152 
5153         seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5154                ucc->src_thread, ucc->dst_thread);
5155         break;
5156     case DMA_DEV_TO_MEM:
5157         seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5158                ucc->src_thread, ucc->dst_thread);
5159         if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5160             seq_printf(s, "rflow%d, ", uc->rflow->id);
5161         break;
5162     case DMA_MEM_TO_DEV:
5163         seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5164                ucc->src_thread, ucc->dst_thread);
5165         if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5166             seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5167         break;
5168     default:
5169         seq_printf(s, ")\n");
5170         return;
5171     }
5172 
5173     if (ucc->ep_type == PSIL_EP_NATIVE) {
5174         seq_printf(s, "PSI-L Native");
5175         if (ucc->metadata_size) {
5176             seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5177             if (ucc->psd_size)
5178                 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5179             seq_printf(s, " ]");
5180         }
5181     } else {
5182         seq_printf(s, "PDMA");
5183         if (ucc->enable_acc32 || ucc->enable_burst)
5184             seq_printf(s, "[%s%s ]",
5185                    ucc->enable_acc32 ? " ACC32" : "",
5186                    ucc->enable_burst ? " BURST" : "");
5187     }
5188 
5189     seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5190 }
5191 
5192 static void udma_dbg_summary_show(struct seq_file *s,
5193                   struct dma_device *dma_dev)
5194 {
5195     struct dma_chan *chan;
5196 
5197     list_for_each_entry(chan, &dma_dev->channels, device_node) {
5198         if (chan->client_count)
5199             udma_dbg_summary_show_chan(s, chan);
5200     }
5201 }
5202 #endif /* CONFIG_DEBUG_FS */
5203 
5204 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5205 {
5206     const struct udma_match_data *match_data = ud->match_data;
5207     u8 tpl;
5208 
5209     if (!match_data->enable_memcpy_support)
5210         return DMAENGINE_ALIGN_8_BYTES;
5211 
5212     /* Get the highest TPL level the device supports for memcpy */
5213     if (ud->bchan_cnt)
5214         tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5215     else if (ud->tchan_cnt)
5216         tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5217     else
5218         return DMAENGINE_ALIGN_8_BYTES;
5219 
5220     switch (match_data->burst_size[tpl]) {
5221     case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5222         return DMAENGINE_ALIGN_256_BYTES;
5223     case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5224         return DMAENGINE_ALIGN_128_BYTES;
5225     case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5226     fallthrough;
5227     default:
5228         return DMAENGINE_ALIGN_64_BYTES;
5229     }
5230 }
5231 
5232 #define TI_UDMAC_BUSWIDTHS  (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5233                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5234                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5235                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5236                  BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5237 
5238 static int udma_probe(struct platform_device *pdev)
5239 {
5240     struct device_node *navss_node = pdev->dev.parent->of_node;
5241     const struct soc_device_attribute *soc;
5242     struct device *dev = &pdev->dev;
5243     struct udma_dev *ud;
5244     const struct of_device_id *match;
5245     int i, ret;
5246     int ch_count;
5247 
5248     ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5249     if (ret)
5250         dev_err(dev, "failed to set dma mask stuff\n");
5251 
5252     ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5253     if (!ud)
5254         return -ENOMEM;
5255 
5256     match = of_match_node(udma_of_match, dev->of_node);
5257     if (!match)
5258         match = of_match_node(bcdma_of_match, dev->of_node);
5259     if (!match) {
5260         match = of_match_node(pktdma_of_match, dev->of_node);
5261         if (!match) {
5262             dev_err(dev, "No compatible match found\n");
5263             return -ENODEV;
5264         }
5265     }
5266     ud->match_data = match->data;
5267 
5268     soc = soc_device_match(k3_soc_devices);
5269     if (!soc) {
5270         dev_err(dev, "No compatible SoC found\n");
5271         return -ENODEV;
5272     }
5273     ud->soc_data = soc->data;
5274 
5275     ret = udma_get_mmrs(pdev, ud);
5276     if (ret)
5277         return ret;
5278 
5279     ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5280     if (IS_ERR(ud->tisci_rm.tisci))
5281         return PTR_ERR(ud->tisci_rm.tisci);
5282 
5283     ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5284                    &ud->tisci_rm.tisci_dev_id);
5285     if (ret) {
5286         dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5287         return ret;
5288     }
5289     pdev->id = ud->tisci_rm.tisci_dev_id;
5290 
5291     ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5292                    &ud->tisci_rm.tisci_navss_dev_id);
5293     if (ret) {
5294         dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5295         return ret;
5296     }
5297 
5298     if (ud->match_data->type == DMA_TYPE_UDMA) {
5299         ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5300                        &ud->atype);
5301         if (!ret && ud->atype > 2) {
5302             dev_err(dev, "Invalid atype: %u\n", ud->atype);
5303             return -EINVAL;
5304         }
5305     } else {
5306         ret = of_property_read_u32(dev->of_node, "ti,asel",
5307                        &ud->asel);
5308         if (!ret && ud->asel > 15) {
5309             dev_err(dev, "Invalid asel: %u\n", ud->asel);
5310             return -EINVAL;
5311         }
5312     }
5313 
5314     ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5315     ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5316 
5317     if (ud->match_data->type == DMA_TYPE_UDMA) {
5318         ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5319     } else {
5320         struct k3_ringacc_init_data ring_init_data;
5321 
5322         ring_init_data.tisci = ud->tisci_rm.tisci;
5323         ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5324         if (ud->match_data->type == DMA_TYPE_BCDMA) {
5325             ring_init_data.num_rings = ud->bchan_cnt +
5326                            ud->tchan_cnt +
5327                            ud->rchan_cnt;
5328         } else {
5329             ring_init_data.num_rings = ud->rflow_cnt +
5330                            ud->tflow_cnt;
5331         }
5332 
5333         ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5334     }
5335 
5336     if (IS_ERR(ud->ringacc))
5337         return PTR_ERR(ud->ringacc);
5338 
5339     dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5340                         DOMAIN_BUS_TI_SCI_INTA_MSI);
5341     if (!dev->msi.domain) {
5342         dev_err(dev, "Failed to get MSI domain\n");
5343         return -EPROBE_DEFER;
5344     }
5345 
5346     dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5347     /* cyclic operation is not supported via PKTDMA */
5348     if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5349         dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5350         ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5351     }
5352 
5353     ud->ddev.device_config = udma_slave_config;
5354     ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5355     ud->ddev.device_issue_pending = udma_issue_pending;
5356     ud->ddev.device_tx_status = udma_tx_status;
5357     ud->ddev.device_pause = udma_pause;
5358     ud->ddev.device_resume = udma_resume;
5359     ud->ddev.device_terminate_all = udma_terminate_all;
5360     ud->ddev.device_synchronize = udma_synchronize;
5361 #ifdef CONFIG_DEBUG_FS
5362     ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5363 #endif
5364 
5365     switch (ud->match_data->type) {
5366     case DMA_TYPE_UDMA:
5367         ud->ddev.device_alloc_chan_resources =
5368                     udma_alloc_chan_resources;
5369         break;
5370     case DMA_TYPE_BCDMA:
5371         ud->ddev.device_alloc_chan_resources =
5372                     bcdma_alloc_chan_resources;
5373         ud->ddev.device_router_config = bcdma_router_config;
5374         break;
5375     case DMA_TYPE_PKTDMA:
5376         ud->ddev.device_alloc_chan_resources =
5377                     pktdma_alloc_chan_resources;
5378         break;
5379     default:
5380         return -EINVAL;
5381     }
5382     ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5383 
5384     ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5385     ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5386     ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5387     ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5388     ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5389                        DESC_METADATA_ENGINE;
5390     if (ud->match_data->enable_memcpy_support &&
5391         !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5392         dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5393         ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5394         ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5395     }
5396 
5397     ud->ddev.dev = dev;
5398     ud->dev = dev;
5399     ud->psil_base = ud->match_data->psil_base;
5400 
5401     INIT_LIST_HEAD(&ud->ddev.channels);
5402     INIT_LIST_HEAD(&ud->desc_to_purge);
5403 
5404     ch_count = setup_resources(ud);
5405     if (ch_count <= 0)
5406         return ch_count;
5407 
5408     spin_lock_init(&ud->lock);
5409     INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5410 
5411     ud->desc_align = 64;
5412     if (ud->desc_align < dma_get_cache_alignment())
5413         ud->desc_align = dma_get_cache_alignment();
5414 
5415     ret = udma_setup_rx_flush(ud);
5416     if (ret)
5417         return ret;
5418 
5419     for (i = 0; i < ud->bchan_cnt; i++) {
5420         struct udma_bchan *bchan = &ud->bchans[i];
5421 
5422         bchan->id = i;
5423         bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5424     }
5425 
5426     for (i = 0; i < ud->tchan_cnt; i++) {
5427         struct udma_tchan *tchan = &ud->tchans[i];
5428 
5429         tchan->id = i;
5430         tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5431     }
5432 
5433     for (i = 0; i < ud->rchan_cnt; i++) {
5434         struct udma_rchan *rchan = &ud->rchans[i];
5435 
5436         rchan->id = i;
5437         rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5438     }
5439 
5440     for (i = 0; i < ud->rflow_cnt; i++) {
5441         struct udma_rflow *rflow = &ud->rflows[i];
5442 
5443         rflow->id = i;
5444     }
5445 
5446     for (i = 0; i < ch_count; i++) {
5447         struct udma_chan *uc = &ud->channels[i];
5448 
5449         uc->ud = ud;
5450         uc->vc.desc_free = udma_desc_free;
5451         uc->id = i;
5452         uc->bchan = NULL;
5453         uc->tchan = NULL;
5454         uc->rchan = NULL;
5455         uc->config.remote_thread_id = -1;
5456         uc->config.mapped_channel_id = -1;
5457         uc->config.default_flow_id = -1;
5458         uc->config.dir = DMA_MEM_TO_MEM;
5459         uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5460                       dev_name(dev), i);
5461 
5462         vchan_init(&uc->vc, &ud->ddev);
5463         /* Use custom vchan completion handling */
5464         tasklet_setup(&uc->vc.task, udma_vchan_complete);
5465         init_completion(&uc->teardown_completed);
5466         INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5467     }
5468 
5469     /* Configure the copy_align to the maximum burst size the device supports */
5470     ud->ddev.copy_align = udma_get_copy_align(ud);
5471 
5472     ret = dma_async_device_register(&ud->ddev);
5473     if (ret) {
5474         dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5475         return ret;
5476     }
5477 
5478     platform_set_drvdata(pdev, ud);
5479 
5480     ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5481     if (ret) {
5482         dev_err(dev, "failed to register of_dma controller\n");
5483         dma_async_device_unregister(&ud->ddev);
5484     }
5485 
5486     return ret;
5487 }
5488 
5489 static struct platform_driver udma_driver = {
5490     .driver = {
5491         .name   = "ti-udma",
5492         .of_match_table = udma_of_match,
5493         .suppress_bind_attrs = true,
5494     },
5495     .probe      = udma_probe,
5496 };
5497 builtin_platform_driver(udma_driver);
5498 
5499 static struct platform_driver bcdma_driver = {
5500     .driver = {
5501         .name   = "ti-bcdma",
5502         .of_match_table = bcdma_of_match,
5503         .suppress_bind_attrs = true,
5504     },
5505     .probe      = udma_probe,
5506 };
5507 builtin_platform_driver(bcdma_driver);
5508 
5509 static struct platform_driver pktdma_driver = {
5510     .driver = {
5511         .name   = "ti-pktdma",
5512         .of_match_table = pktdma_of_match,
5513         .suppress_bind_attrs = true,
5514     },
5515     .probe      = udma_probe,
5516 };
5517 builtin_platform_driver(pktdma_driver);
5518 
5519 /* Private interfaces to UDMA */
5520 #include "k3-udma-private.c"