Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Texas Instruments CPDMA Driver
0004  *
0005  * Copyright (C) 2010 Texas Instruments
0006  *
0007  */
0008 #include <linux/kernel.h>
0009 #include <linux/spinlock.h>
0010 #include <linux/device.h>
0011 #include <linux/module.h>
0012 #include <linux/slab.h>
0013 #include <linux/err.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/io.h>
0016 #include <linux/delay.h>
0017 #include <linux/genalloc.h>
0018 #include "davinci_cpdma.h"
0019 
0020 /* DMA Registers */
0021 #define CPDMA_TXIDVER       0x00
0022 #define CPDMA_TXCONTROL     0x04
0023 #define CPDMA_TXTEARDOWN    0x08
0024 #define CPDMA_RXIDVER       0x10
0025 #define CPDMA_RXCONTROL     0x14
0026 #define CPDMA_SOFTRESET     0x1c
0027 #define CPDMA_RXTEARDOWN    0x18
0028 #define CPDMA_TX_PRI0_RATE  0x30
0029 #define CPDMA_TXINTSTATRAW  0x80
0030 #define CPDMA_TXINTSTATMASKED   0x84
0031 #define CPDMA_TXINTMASKSET  0x88
0032 #define CPDMA_TXINTMASKCLEAR    0x8c
0033 #define CPDMA_MACINVECTOR   0x90
0034 #define CPDMA_MACEOIVECTOR  0x94
0035 #define CPDMA_RXINTSTATRAW  0xa0
0036 #define CPDMA_RXINTSTATMASKED   0xa4
0037 #define CPDMA_RXINTMASKSET  0xa8
0038 #define CPDMA_RXINTMASKCLEAR    0xac
0039 #define CPDMA_DMAINTSTATRAW 0xb0
0040 #define CPDMA_DMAINTSTATMASKED  0xb4
0041 #define CPDMA_DMAINTMASKSET 0xb8
0042 #define CPDMA_DMAINTMASKCLEAR   0xbc
0043 #define CPDMA_DMAINT_HOSTERR    BIT(1)
0044 
0045 /* the following exist only if has_ext_regs is set */
0046 #define CPDMA_DMACONTROL    0x20
0047 #define CPDMA_DMASTATUS     0x24
0048 #define CPDMA_RXBUFFOFS     0x28
0049 #define CPDMA_EM_CONTROL    0x2c
0050 
0051 /* Descriptor mode bits */
0052 #define CPDMA_DESC_SOP      BIT(31)
0053 #define CPDMA_DESC_EOP      BIT(30)
0054 #define CPDMA_DESC_OWNER    BIT(29)
0055 #define CPDMA_DESC_EOQ      BIT(28)
0056 #define CPDMA_DESC_TD_COMPLETE  BIT(27)
0057 #define CPDMA_DESC_PASS_CRC BIT(26)
0058 #define CPDMA_DESC_TO_PORT_EN   BIT(20)
0059 #define CPDMA_TO_PORT_SHIFT 16
0060 #define CPDMA_DESC_PORT_MASK    (BIT(18) | BIT(17) | BIT(16))
0061 #define CPDMA_DESC_CRC_LEN  4
0062 
0063 #define CPDMA_TEARDOWN_VALUE    0xfffffffc
0064 
0065 #define CPDMA_MAX_RLIM_CNT  16384
0066 
0067 struct cpdma_desc {
0068     /* hardware fields */
0069     u32         hw_next;
0070     u32         hw_buffer;
0071     u32         hw_len;
0072     u32         hw_mode;
0073     /* software fields */
0074     void            *sw_token;
0075     u32         sw_buffer;
0076     u32         sw_len;
0077 };
0078 
0079 struct cpdma_desc_pool {
0080     phys_addr_t     phys;
0081     dma_addr_t      hw_addr;
0082     void __iomem        *iomap;     /* ioremap map */
0083     void            *cpumap;    /* dma_alloc map */
0084     int         desc_size, mem_size;
0085     int         num_desc;
0086     struct device       *dev;
0087     struct gen_pool     *gen_pool;
0088 };
0089 
0090 enum cpdma_state {
0091     CPDMA_STATE_IDLE,
0092     CPDMA_STATE_ACTIVE,
0093     CPDMA_STATE_TEARDOWN,
0094 };
0095 
0096 struct cpdma_ctlr {
0097     enum cpdma_state    state;
0098     struct cpdma_params params;
0099     struct device       *dev;
0100     struct cpdma_desc_pool  *pool;
0101     spinlock_t      lock;
0102     struct cpdma_chan   *channels[2 * CPDMA_MAX_CHANNELS];
0103     int chan_num;
0104     int         num_rx_desc; /* RX descriptors number */
0105     int         num_tx_desc; /* TX descriptors number */
0106 };
0107 
0108 struct cpdma_chan {
0109     struct cpdma_desc __iomem   *head, *tail;
0110     void __iomem            *hdp, *cp, *rxfree;
0111     enum cpdma_state        state;
0112     struct cpdma_ctlr       *ctlr;
0113     int             chan_num;
0114     spinlock_t          lock;
0115     int             count;
0116     u32             desc_num;
0117     u32             mask;
0118     cpdma_handler_fn        handler;
0119     enum dma_data_direction     dir;
0120     struct cpdma_chan_stats     stats;
0121     /* offsets into dmaregs */
0122     int int_set, int_clear, td;
0123     int             weight;
0124     u32             rate_factor;
0125     u32             rate;
0126 };
0127 
0128 struct cpdma_control_info {
0129     u32     reg;
0130     u32     shift, mask;
0131     int     access;
0132 #define ACCESS_RO   BIT(0)
0133 #define ACCESS_WO   BIT(1)
0134 #define ACCESS_RW   (ACCESS_RO | ACCESS_WO)
0135 };
0136 
0137 struct submit_info {
0138     struct cpdma_chan *chan;
0139     int directed;
0140     void *token;
0141     void *data_virt;
0142     dma_addr_t data_dma;
0143     int len;
0144 };
0145 
0146 static struct cpdma_control_info controls[] = {
0147     [CPDMA_TX_RLIM]       = {CPDMA_DMACONTROL,  8,  0xffff, ACCESS_RW},
0148     [CPDMA_CMD_IDLE]      = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
0149     [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
0150     [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
0151     [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
0152     [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
0153     [CPDMA_STAT_IDLE]     = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
0154     [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
0155     [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
0156     [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
0157     [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
0158     [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
0159 };
0160 
0161 #define tx_chan_num(chan)   (chan)
0162 #define rx_chan_num(chan)   ((chan) + CPDMA_MAX_CHANNELS)
0163 #define is_rx_chan(chan)    ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
0164 #define is_tx_chan(chan)    (!is_rx_chan(chan))
0165 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
0166 #define chan_linear(chan)   __chan_linear((chan)->chan_num)
0167 
0168 /* The following make access to common cpdma_ctlr params more readable */
0169 #define dmaregs     params.dmaregs
0170 #define num_chan    params.num_chan
0171 
0172 /* various accessors */
0173 #define dma_reg_read(ctlr, ofs)     readl((ctlr)->dmaregs + (ofs))
0174 #define chan_read(chan, fld)        readl((chan)->fld)
0175 #define desc_read(desc, fld)        readl(&(desc)->fld)
0176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
0177 #define chan_write(chan, fld, v)    writel(v, (chan)->fld)
0178 #define desc_write(desc, fld, v)    writel((u32)(v), &(desc)->fld)
0179 
0180 #define cpdma_desc_to_port(chan, mode, directed)            \
0181     do {                                \
0182         if (!is_rx_chan(chan) && ((directed == 1) ||        \
0183                       (directed == 2)))     \
0184             mode |= (CPDMA_DESC_TO_PORT_EN |        \
0185                  (directed << CPDMA_TO_PORT_SHIFT));    \
0186     } while (0)
0187 
0188 #define CPDMA_DMA_EXT_MAP       BIT(16)
0189 
0190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
0191 {
0192     struct cpdma_desc_pool *pool = ctlr->pool;
0193 
0194     if (!pool)
0195         return;
0196 
0197     WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
0198          "cpdma_desc_pool size %zd != avail %zd",
0199          gen_pool_size(pool->gen_pool),
0200          gen_pool_avail(pool->gen_pool));
0201     if (pool->cpumap)
0202         dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
0203                   pool->phys);
0204 }
0205 
0206 /*
0207  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
0208  * emac) have dedicated on-chip memory for these descriptors.  Some other
0209  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
0210  * abstract out these details
0211  */
0212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
0213 {
0214     struct cpdma_params *cpdma_params = &ctlr->params;
0215     struct cpdma_desc_pool *pool;
0216     int ret = -ENOMEM;
0217 
0218     pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
0219     if (!pool)
0220         goto gen_pool_create_fail;
0221     ctlr->pool = pool;
0222 
0223     pool->mem_size  = cpdma_params->desc_mem_size;
0224     pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
0225                 cpdma_params->desc_align);
0226     pool->num_desc  = pool->mem_size / pool->desc_size;
0227 
0228     if (cpdma_params->descs_pool_size) {
0229         /* recalculate memory size required cpdma descriptor pool
0230          * basing on number of descriptors specified by user and
0231          * if memory size > CPPI internal RAM size (desc_mem_size)
0232          * then switch to use DDR
0233          */
0234         pool->num_desc = cpdma_params->descs_pool_size;
0235         pool->mem_size = pool->desc_size * pool->num_desc;
0236         if (pool->mem_size > cpdma_params->desc_mem_size)
0237             cpdma_params->desc_mem_phys = 0;
0238     }
0239 
0240     pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
0241                           -1, "cpdma");
0242     if (IS_ERR(pool->gen_pool)) {
0243         ret = PTR_ERR(pool->gen_pool);
0244         dev_err(ctlr->dev, "pool create failed %d\n", ret);
0245         goto gen_pool_create_fail;
0246     }
0247 
0248     if (cpdma_params->desc_mem_phys) {
0249         pool->phys  = cpdma_params->desc_mem_phys;
0250         pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
0251                        pool->mem_size);
0252         pool->hw_addr = cpdma_params->desc_hw_addr;
0253     } else {
0254         pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
0255                           &pool->hw_addr, GFP_KERNEL);
0256         pool->iomap = (void __iomem __force *)pool->cpumap;
0257         pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
0258     }
0259 
0260     if (!pool->iomap)
0261         goto gen_pool_create_fail;
0262 
0263     ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
0264                 pool->phys, pool->mem_size, -1);
0265     if (ret < 0) {
0266         dev_err(ctlr->dev, "pool add failed %d\n", ret);
0267         goto gen_pool_add_virt_fail;
0268     }
0269 
0270     return 0;
0271 
0272 gen_pool_add_virt_fail:
0273     cpdma_desc_pool_destroy(ctlr);
0274 gen_pool_create_fail:
0275     ctlr->pool = NULL;
0276     return ret;
0277 }
0278 
0279 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
0280           struct cpdma_desc __iomem *desc)
0281 {
0282     if (!desc)
0283         return 0;
0284     return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
0285 }
0286 
0287 static inline struct cpdma_desc __iomem *
0288 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
0289 {
0290     return dma ? pool->iomap + dma - pool->hw_addr : NULL;
0291 }
0292 
0293 static struct cpdma_desc __iomem *
0294 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
0295 {
0296     return (struct cpdma_desc __iomem *)
0297         gen_pool_alloc(pool->gen_pool, pool->desc_size);
0298 }
0299 
0300 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
0301                 struct cpdma_desc __iomem *desc, int num_desc)
0302 {
0303     gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
0304 }
0305 
0306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
0307 {
0308     struct cpdma_control_info *info = &controls[control];
0309     u32 val;
0310 
0311     if (!ctlr->params.has_ext_regs)
0312         return -ENOTSUPP;
0313 
0314     if (ctlr->state != CPDMA_STATE_ACTIVE)
0315         return -EINVAL;
0316 
0317     if (control < 0 || control >= ARRAY_SIZE(controls))
0318         return -ENOENT;
0319 
0320     if ((info->access & ACCESS_WO) != ACCESS_WO)
0321         return -EPERM;
0322 
0323     val  = dma_reg_read(ctlr, info->reg);
0324     val &= ~(info->mask << info->shift);
0325     val |= (value & info->mask) << info->shift;
0326     dma_reg_write(ctlr, info->reg, val);
0327 
0328     return 0;
0329 }
0330 
0331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
0332 {
0333     struct cpdma_control_info *info = &controls[control];
0334     int ret;
0335 
0336     if (!ctlr->params.has_ext_regs)
0337         return -ENOTSUPP;
0338 
0339     if (ctlr->state != CPDMA_STATE_ACTIVE)
0340         return -EINVAL;
0341 
0342     if (control < 0 || control >= ARRAY_SIZE(controls))
0343         return -ENOENT;
0344 
0345     if ((info->access & ACCESS_RO) != ACCESS_RO)
0346         return -EPERM;
0347 
0348     ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
0349     return ret;
0350 }
0351 
0352 /* cpdma_chan_set_chan_shaper - set shaper for a channel
0353  * Has to be called under ctlr lock
0354  */
0355 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
0356 {
0357     struct cpdma_ctlr *ctlr = chan->ctlr;
0358     u32 rate_reg;
0359     u32 rmask;
0360     int ret;
0361 
0362     if (!chan->rate)
0363         return 0;
0364 
0365     rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
0366     dma_reg_write(ctlr, rate_reg, chan->rate_factor);
0367 
0368     rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
0369     rmask |= chan->mask;
0370 
0371     ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
0372     return ret;
0373 }
0374 
0375 static int cpdma_chan_on(struct cpdma_chan *chan)
0376 {
0377     struct cpdma_ctlr *ctlr = chan->ctlr;
0378     struct cpdma_desc_pool  *pool = ctlr->pool;
0379     unsigned long flags;
0380 
0381     spin_lock_irqsave(&chan->lock, flags);
0382     if (chan->state != CPDMA_STATE_IDLE) {
0383         spin_unlock_irqrestore(&chan->lock, flags);
0384         return -EBUSY;
0385     }
0386     if (ctlr->state != CPDMA_STATE_ACTIVE) {
0387         spin_unlock_irqrestore(&chan->lock, flags);
0388         return -EINVAL;
0389     }
0390     dma_reg_write(ctlr, chan->int_set, chan->mask);
0391     chan->state = CPDMA_STATE_ACTIVE;
0392     if (chan->head) {
0393         chan_write(chan, hdp, desc_phys(pool, chan->head));
0394         if (chan->rxfree)
0395             chan_write(chan, rxfree, chan->count);
0396     }
0397 
0398     spin_unlock_irqrestore(&chan->lock, flags);
0399     return 0;
0400 }
0401 
0402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
0403  * rmask - mask of rate limited channels
0404  * Returns min rate in Kb/s
0405  */
0406 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
0407                    u32 *rmask, int *prio_mode)
0408 {
0409     struct cpdma_ctlr *ctlr = ch->ctlr;
0410     struct cpdma_chan *chan;
0411     u32 old_rate = ch->rate;
0412     u32 new_rmask = 0;
0413     int rlim = 0;
0414     int i;
0415 
0416     for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
0417         chan = ctlr->channels[i];
0418         if (!chan)
0419             continue;
0420 
0421         if (chan == ch)
0422             chan->rate = rate;
0423 
0424         if (chan->rate) {
0425             rlim = 1;
0426             new_rmask |= chan->mask;
0427             continue;
0428         }
0429 
0430         if (rlim)
0431             goto err;
0432     }
0433 
0434     *rmask = new_rmask;
0435     *prio_mode = rlim;
0436     return 0;
0437 
0438 err:
0439     ch->rate = old_rate;
0440     dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
0441         chan->chan_num);
0442     return -EINVAL;
0443 }
0444 
0445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
0446                   struct cpdma_chan *ch)
0447 {
0448     u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
0449     u32 best_send_cnt = 0, best_idle_cnt = 0;
0450     u32 new_rate, best_rate = 0, rate_reg;
0451     u64 send_cnt, idle_cnt;
0452     u32 min_send_cnt, freq;
0453     u64 divident, divisor;
0454 
0455     if (!ch->rate) {
0456         ch->rate_factor = 0;
0457         goto set_factor;
0458     }
0459 
0460     freq = ctlr->params.bus_freq_mhz * 1000 * 32;
0461     if (!freq) {
0462         dev_err(ctlr->dev, "The bus frequency is not set\n");
0463         return -EINVAL;
0464     }
0465 
0466     min_send_cnt = freq - ch->rate;
0467     send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
0468     while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
0469         divident = ch->rate * send_cnt;
0470         divisor = min_send_cnt;
0471         idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
0472 
0473         divident = freq * idle_cnt;
0474         divisor = idle_cnt + send_cnt;
0475         new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
0476 
0477         delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
0478         if (delta < best_delta) {
0479             best_delta = delta;
0480             best_send_cnt = send_cnt;
0481             best_idle_cnt = idle_cnt;
0482             best_rate = new_rate;
0483 
0484             if (!delta)
0485                 break;
0486         }
0487 
0488         if (prev_delta >= delta) {
0489             prev_delta = delta;
0490             send_cnt++;
0491             continue;
0492         }
0493 
0494         idle_cnt++;
0495         divident = freq * idle_cnt;
0496         send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
0497         send_cnt -= idle_cnt;
0498         prev_delta = UINT_MAX;
0499     }
0500 
0501     ch->rate = best_rate;
0502     ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
0503 
0504 set_factor:
0505     rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
0506     dma_reg_write(ctlr, rate_reg, ch->rate_factor);
0507     return 0;
0508 }
0509 
0510 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
0511 {
0512     struct cpdma_ctlr *ctlr;
0513 
0514     ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
0515     if (!ctlr)
0516         return NULL;
0517 
0518     ctlr->state = CPDMA_STATE_IDLE;
0519     ctlr->params = *params;
0520     ctlr->dev = params->dev;
0521     ctlr->chan_num = 0;
0522     spin_lock_init(&ctlr->lock);
0523 
0524     if (cpdma_desc_pool_create(ctlr))
0525         return NULL;
0526     /* split pool equally between RX/TX by default */
0527     ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
0528     ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
0529 
0530     if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
0531         ctlr->num_chan = CPDMA_MAX_CHANNELS;
0532     return ctlr;
0533 }
0534 
0535 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
0536 {
0537     struct cpdma_chan *chan;
0538     unsigned long flags;
0539     int i, prio_mode;
0540 
0541     spin_lock_irqsave(&ctlr->lock, flags);
0542     if (ctlr->state != CPDMA_STATE_IDLE) {
0543         spin_unlock_irqrestore(&ctlr->lock, flags);
0544         return -EBUSY;
0545     }
0546 
0547     if (ctlr->params.has_soft_reset) {
0548         unsigned timeout = 10 * 100;
0549 
0550         dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
0551         while (timeout) {
0552             if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
0553                 break;
0554             udelay(10);
0555             timeout--;
0556         }
0557         WARN_ON(!timeout);
0558     }
0559 
0560     for (i = 0; i < ctlr->num_chan; i++) {
0561         writel(0, ctlr->params.txhdp + 4 * i);
0562         writel(0, ctlr->params.rxhdp + 4 * i);
0563         writel(0, ctlr->params.txcp + 4 * i);
0564         writel(0, ctlr->params.rxcp + 4 * i);
0565     }
0566 
0567     dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
0568     dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
0569 
0570     dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
0571     dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
0572 
0573     ctlr->state = CPDMA_STATE_ACTIVE;
0574 
0575     prio_mode = 0;
0576     for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
0577         chan = ctlr->channels[i];
0578         if (chan) {
0579             cpdma_chan_set_chan_shaper(chan);
0580             cpdma_chan_on(chan);
0581 
0582             /* off prio mode if all tx channels are rate limited */
0583             if (is_tx_chan(chan) && !chan->rate)
0584                 prio_mode = 1;
0585         }
0586     }
0587 
0588     _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
0589     _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
0590 
0591     spin_unlock_irqrestore(&ctlr->lock, flags);
0592     return 0;
0593 }
0594 
0595 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
0596 {
0597     unsigned long flags;
0598     int i;
0599 
0600     spin_lock_irqsave(&ctlr->lock, flags);
0601     if (ctlr->state != CPDMA_STATE_ACTIVE) {
0602         spin_unlock_irqrestore(&ctlr->lock, flags);
0603         return -EINVAL;
0604     }
0605 
0606     ctlr->state = CPDMA_STATE_TEARDOWN;
0607     spin_unlock_irqrestore(&ctlr->lock, flags);
0608 
0609     for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
0610         if (ctlr->channels[i])
0611             cpdma_chan_stop(ctlr->channels[i]);
0612     }
0613 
0614     spin_lock_irqsave(&ctlr->lock, flags);
0615     dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
0616     dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
0617 
0618     dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
0619     dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
0620 
0621     ctlr->state = CPDMA_STATE_IDLE;
0622 
0623     spin_unlock_irqrestore(&ctlr->lock, flags);
0624     return 0;
0625 }
0626 
0627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
0628 {
0629     int ret = 0, i;
0630 
0631     if (!ctlr)
0632         return -EINVAL;
0633 
0634     if (ctlr->state != CPDMA_STATE_IDLE)
0635         cpdma_ctlr_stop(ctlr);
0636 
0637     for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
0638         cpdma_chan_destroy(ctlr->channels[i]);
0639 
0640     cpdma_desc_pool_destroy(ctlr);
0641     return ret;
0642 }
0643 
0644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
0645 {
0646     unsigned long flags;
0647     int i;
0648 
0649     spin_lock_irqsave(&ctlr->lock, flags);
0650     if (ctlr->state != CPDMA_STATE_ACTIVE) {
0651         spin_unlock_irqrestore(&ctlr->lock, flags);
0652         return -EINVAL;
0653     }
0654 
0655     for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
0656         if (ctlr->channels[i])
0657             cpdma_chan_int_ctrl(ctlr->channels[i], enable);
0658     }
0659 
0660     spin_unlock_irqrestore(&ctlr->lock, flags);
0661     return 0;
0662 }
0663 
0664 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
0665 {
0666     dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
0667 }
0668 
0669 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
0670 {
0671     return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
0672 }
0673 
0674 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
0675 {
0676     return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
0677 }
0678 
0679 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
0680                  int rx, int desc_num,
0681                  int per_ch_desc)
0682 {
0683     struct cpdma_chan *chan, *most_chan = NULL;
0684     int desc_cnt = desc_num;
0685     int most_dnum = 0;
0686     int min, max, i;
0687 
0688     if (!desc_num)
0689         return;
0690 
0691     if (rx) {
0692         min = rx_chan_num(0);
0693         max = rx_chan_num(CPDMA_MAX_CHANNELS);
0694     } else {
0695         min = tx_chan_num(0);
0696         max = tx_chan_num(CPDMA_MAX_CHANNELS);
0697     }
0698 
0699     for (i = min; i < max; i++) {
0700         chan = ctlr->channels[i];
0701         if (!chan)
0702             continue;
0703 
0704         if (chan->weight)
0705             chan->desc_num = (chan->weight * desc_num) / 100;
0706         else
0707             chan->desc_num = per_ch_desc;
0708 
0709         desc_cnt -= chan->desc_num;
0710 
0711         if (most_dnum < chan->desc_num) {
0712             most_dnum = chan->desc_num;
0713             most_chan = chan;
0714         }
0715     }
0716     /* use remains */
0717     if (most_chan)
0718         most_chan->desc_num += desc_cnt;
0719 }
0720 
0721 /*
0722  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
0723  * Has to be called under ctlr lock
0724  */
0725 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
0726 {
0727     int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
0728     int free_rx_num = 0, free_tx_num = 0;
0729     int rx_weight = 0, tx_weight = 0;
0730     int tx_desc_num, rx_desc_num;
0731     struct cpdma_chan *chan;
0732     int i;
0733 
0734     if (!ctlr->chan_num)
0735         return 0;
0736 
0737     for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
0738         chan = ctlr->channels[i];
0739         if (!chan)
0740             continue;
0741 
0742         if (is_rx_chan(chan)) {
0743             if (!chan->weight)
0744                 free_rx_num++;
0745             rx_weight += chan->weight;
0746         } else {
0747             if (!chan->weight)
0748                 free_tx_num++;
0749             tx_weight += chan->weight;
0750         }
0751     }
0752 
0753     if (rx_weight > 100 || tx_weight > 100)
0754         return -EINVAL;
0755 
0756     tx_desc_num = ctlr->num_tx_desc;
0757     rx_desc_num = ctlr->num_rx_desc;
0758 
0759     if (free_tx_num) {
0760         tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
0761         tx_per_ch_desc /= free_tx_num;
0762     }
0763     if (free_rx_num) {
0764         rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
0765         rx_per_ch_desc /= free_rx_num;
0766     }
0767 
0768     cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
0769     cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
0770 
0771     return 0;
0772 }
0773 
0774 
0775 /* cpdma_chan_set_weight - set weight of a channel in percentage.
0776  * Tx and Rx channels have separate weights. That is 100% for RX
0777  * and 100% for Tx. The weight is used to split cpdma resources
0778  * in correct proportion required by the channels, including number
0779  * of descriptors. The channel rate is not enough to know the
0780  * weight of a channel as the maximum rate of an interface is needed.
0781  * If weight = 0, then channel uses rest of descriptors leaved by
0782  * weighted channels.
0783  */
0784 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
0785 {
0786     struct cpdma_ctlr *ctlr = ch->ctlr;
0787     unsigned long flags, ch_flags;
0788     int ret;
0789 
0790     spin_lock_irqsave(&ctlr->lock, flags);
0791     spin_lock_irqsave(&ch->lock, ch_flags);
0792     if (ch->weight == weight) {
0793         spin_unlock_irqrestore(&ch->lock, ch_flags);
0794         spin_unlock_irqrestore(&ctlr->lock, flags);
0795         return 0;
0796     }
0797     ch->weight = weight;
0798     spin_unlock_irqrestore(&ch->lock, ch_flags);
0799 
0800     /* re-split pool using new channel weight */
0801     ret = cpdma_chan_split_pool(ctlr);
0802     spin_unlock_irqrestore(&ctlr->lock, flags);
0803     return ret;
0804 }
0805 
0806 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
0807  * Should be called before cpdma_chan_set_rate.
0808  * Returns min rate in Kb/s
0809  */
0810 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
0811 {
0812     unsigned int divident, divisor;
0813 
0814     divident = ctlr->params.bus_freq_mhz * 32 * 1000;
0815     divisor = 1 + CPDMA_MAX_RLIM_CNT;
0816 
0817     return DIV_ROUND_UP(divident, divisor);
0818 }
0819 
0820 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
0821  * The bandwidth * limited channels have to be in order beginning from lowest.
0822  * ch - transmit channel the bandwidth is configured for
0823  * rate - bandwidth in Kb/s, if 0 - then off shaper
0824  */
0825 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
0826 {
0827     unsigned long flags, ch_flags;
0828     struct cpdma_ctlr *ctlr;
0829     int ret, prio_mode;
0830     u32 rmask;
0831 
0832     if (!ch || !is_tx_chan(ch))
0833         return -EINVAL;
0834 
0835     if (ch->rate == rate)
0836         return rate;
0837 
0838     ctlr = ch->ctlr;
0839     spin_lock_irqsave(&ctlr->lock, flags);
0840     spin_lock_irqsave(&ch->lock, ch_flags);
0841 
0842     ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
0843     if (ret)
0844         goto err;
0845 
0846     ret = cpdma_chan_set_factors(ctlr, ch);
0847     if (ret)
0848         goto err;
0849 
0850     spin_unlock_irqrestore(&ch->lock, ch_flags);
0851 
0852     /* on shapers */
0853     _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
0854     _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
0855     spin_unlock_irqrestore(&ctlr->lock, flags);
0856     return ret;
0857 
0858 err:
0859     spin_unlock_irqrestore(&ch->lock, ch_flags);
0860     spin_unlock_irqrestore(&ctlr->lock, flags);
0861     return ret;
0862 }
0863 
0864 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
0865 {
0866     unsigned long flags;
0867     u32 rate;
0868 
0869     spin_lock_irqsave(&ch->lock, flags);
0870     rate = ch->rate;
0871     spin_unlock_irqrestore(&ch->lock, flags);
0872 
0873     return rate;
0874 }
0875 
0876 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
0877                      cpdma_handler_fn handler, int rx_type)
0878 {
0879     int offset = chan_num * 4;
0880     struct cpdma_chan *chan;
0881     unsigned long flags;
0882 
0883     chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
0884 
0885     if (__chan_linear(chan_num) >= ctlr->num_chan)
0886         return ERR_PTR(-EINVAL);
0887 
0888     chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
0889     if (!chan)
0890         return ERR_PTR(-ENOMEM);
0891 
0892     spin_lock_irqsave(&ctlr->lock, flags);
0893     if (ctlr->channels[chan_num]) {
0894         spin_unlock_irqrestore(&ctlr->lock, flags);
0895         devm_kfree(ctlr->dev, chan);
0896         return ERR_PTR(-EBUSY);
0897     }
0898 
0899     chan->ctlr  = ctlr;
0900     chan->state = CPDMA_STATE_IDLE;
0901     chan->chan_num  = chan_num;
0902     chan->handler   = handler;
0903     chan->rate  = 0;
0904     chan->weight    = 0;
0905 
0906     if (is_rx_chan(chan)) {
0907         chan->hdp   = ctlr->params.rxhdp + offset;
0908         chan->cp    = ctlr->params.rxcp + offset;
0909         chan->rxfree    = ctlr->params.rxfree + offset;
0910         chan->int_set   = CPDMA_RXINTMASKSET;
0911         chan->int_clear = CPDMA_RXINTMASKCLEAR;
0912         chan->td    = CPDMA_RXTEARDOWN;
0913         chan->dir   = DMA_FROM_DEVICE;
0914     } else {
0915         chan->hdp   = ctlr->params.txhdp + offset;
0916         chan->cp    = ctlr->params.txcp + offset;
0917         chan->int_set   = CPDMA_TXINTMASKSET;
0918         chan->int_clear = CPDMA_TXINTMASKCLEAR;
0919         chan->td    = CPDMA_TXTEARDOWN;
0920         chan->dir   = DMA_TO_DEVICE;
0921     }
0922     chan->mask = BIT(chan_linear(chan));
0923 
0924     spin_lock_init(&chan->lock);
0925 
0926     ctlr->channels[chan_num] = chan;
0927     ctlr->chan_num++;
0928 
0929     cpdma_chan_split_pool(ctlr);
0930 
0931     spin_unlock_irqrestore(&ctlr->lock, flags);
0932     return chan;
0933 }
0934 
0935 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
0936 {
0937     unsigned long flags;
0938     int desc_num;
0939 
0940     spin_lock_irqsave(&chan->lock, flags);
0941     desc_num = chan->desc_num;
0942     spin_unlock_irqrestore(&chan->lock, flags);
0943 
0944     return desc_num;
0945 }
0946 
0947 int cpdma_chan_destroy(struct cpdma_chan *chan)
0948 {
0949     struct cpdma_ctlr *ctlr;
0950     unsigned long flags;
0951 
0952     if (!chan)
0953         return -EINVAL;
0954     ctlr = chan->ctlr;
0955 
0956     spin_lock_irqsave(&ctlr->lock, flags);
0957     if (chan->state != CPDMA_STATE_IDLE)
0958         cpdma_chan_stop(chan);
0959     ctlr->channels[chan->chan_num] = NULL;
0960     ctlr->chan_num--;
0961     devm_kfree(ctlr->dev, chan);
0962     cpdma_chan_split_pool(ctlr);
0963 
0964     spin_unlock_irqrestore(&ctlr->lock, flags);
0965     return 0;
0966 }
0967 
0968 int cpdma_chan_get_stats(struct cpdma_chan *chan,
0969              struct cpdma_chan_stats *stats)
0970 {
0971     unsigned long flags;
0972     if (!chan)
0973         return -EINVAL;
0974     spin_lock_irqsave(&chan->lock, flags);
0975     memcpy(stats, &chan->stats, sizeof(*stats));
0976     spin_unlock_irqrestore(&chan->lock, flags);
0977     return 0;
0978 }
0979 
0980 static void __cpdma_chan_submit(struct cpdma_chan *chan,
0981                 struct cpdma_desc __iomem *desc)
0982 {
0983     struct cpdma_ctlr       *ctlr = chan->ctlr;
0984     struct cpdma_desc __iomem   *prev = chan->tail;
0985     struct cpdma_desc_pool      *pool = ctlr->pool;
0986     dma_addr_t          desc_dma;
0987     u32             mode;
0988 
0989     desc_dma = desc_phys(pool, desc);
0990 
0991     /* simple case - idle channel */
0992     if (!chan->head) {
0993         chan->stats.head_enqueue++;
0994         chan->head = desc;
0995         chan->tail = desc;
0996         if (chan->state == CPDMA_STATE_ACTIVE)
0997             chan_write(chan, hdp, desc_dma);
0998         return;
0999     }
1000 
1001     /* first chain the descriptor at the tail of the list */
1002     desc_write(prev, hw_next, desc_dma);
1003     chan->tail = desc;
1004     chan->stats.tail_enqueue++;
1005 
1006     /* next check if EOQ has been triggered already */
1007     mode = desc_read(prev, hw_mode);
1008     if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1009         (chan->state == CPDMA_STATE_ACTIVE)) {
1010         desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1011         chan_write(chan, hdp, desc_dma);
1012         chan->stats.misqueued++;
1013     }
1014 }
1015 
1016 static int cpdma_chan_submit_si(struct submit_info *si)
1017 {
1018     struct cpdma_chan       *chan = si->chan;
1019     struct cpdma_ctlr       *ctlr = chan->ctlr;
1020     int             len = si->len;
1021     struct cpdma_desc __iomem   *desc;
1022     dma_addr_t          buffer;
1023     u32             mode;
1024     int             ret;
1025 
1026     if (chan->count >= chan->desc_num)  {
1027         chan->stats.desc_alloc_fail++;
1028         return -ENOMEM;
1029     }
1030 
1031     desc = cpdma_desc_alloc(ctlr->pool);
1032     if (!desc) {
1033         chan->stats.desc_alloc_fail++;
1034         return -ENOMEM;
1035     }
1036 
1037     if (len < ctlr->params.min_packet_size) {
1038         len = ctlr->params.min_packet_size;
1039         chan->stats.runt_transmit_buff++;
1040     }
1041 
1042     mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1043     cpdma_desc_to_port(chan, mode, si->directed);
1044 
1045     if (si->data_dma) {
1046         buffer = si->data_dma;
1047         dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1048     } else {
1049         buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
1050         ret = dma_mapping_error(ctlr->dev, buffer);
1051         if (ret) {
1052             cpdma_desc_free(ctlr->pool, desc, 1);
1053             return -EINVAL;
1054         }
1055     }
1056 
1057     /* Relaxed IO accessors can be used here as there is read barrier
1058      * at the end of write sequence.
1059      */
1060     writel_relaxed(0, &desc->hw_next);
1061     writel_relaxed(buffer, &desc->hw_buffer);
1062     writel_relaxed(len, &desc->hw_len);
1063     writel_relaxed(mode | len, &desc->hw_mode);
1064     writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1065     writel_relaxed(buffer, &desc->sw_buffer);
1066     writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
1067                &desc->sw_len);
1068     desc_read(desc, sw_len);
1069 
1070     __cpdma_chan_submit(chan, desc);
1071 
1072     if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1073         chan_write(chan, rxfree, 1);
1074 
1075     chan->count++;
1076     return 0;
1077 }
1078 
1079 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1080                int len, int directed)
1081 {
1082     struct submit_info si;
1083     unsigned long flags;
1084     int ret;
1085 
1086     si.chan = chan;
1087     si.token = token;
1088     si.data_virt = data;
1089     si.data_dma = 0;
1090     si.len = len;
1091     si.directed = directed;
1092 
1093     spin_lock_irqsave(&chan->lock, flags);
1094     if (chan->state == CPDMA_STATE_TEARDOWN) {
1095         spin_unlock_irqrestore(&chan->lock, flags);
1096         return -EINVAL;
1097     }
1098 
1099     ret = cpdma_chan_submit_si(&si);
1100     spin_unlock_irqrestore(&chan->lock, flags);
1101     return ret;
1102 }
1103 
1104 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
1105                   dma_addr_t data, int len, int directed)
1106 {
1107     struct submit_info si;
1108     unsigned long flags;
1109     int ret;
1110 
1111     si.chan = chan;
1112     si.token = token;
1113     si.data_virt = NULL;
1114     si.data_dma = data;
1115     si.len = len;
1116     si.directed = directed;
1117 
1118     spin_lock_irqsave(&chan->lock, flags);
1119     if (chan->state == CPDMA_STATE_TEARDOWN) {
1120         spin_unlock_irqrestore(&chan->lock, flags);
1121         return -EINVAL;
1122     }
1123 
1124     ret = cpdma_chan_submit_si(&si);
1125     spin_unlock_irqrestore(&chan->lock, flags);
1126     return ret;
1127 }
1128 
1129 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1130               int len, int directed)
1131 {
1132     struct submit_info si;
1133     unsigned long flags;
1134     int ret;
1135 
1136     si.chan = chan;
1137     si.token = token;
1138     si.data_virt = data;
1139     si.data_dma = 0;
1140     si.len = len;
1141     si.directed = directed;
1142 
1143     spin_lock_irqsave(&chan->lock, flags);
1144     if (chan->state != CPDMA_STATE_ACTIVE) {
1145         spin_unlock_irqrestore(&chan->lock, flags);
1146         return -EINVAL;
1147     }
1148 
1149     ret = cpdma_chan_submit_si(&si);
1150     spin_unlock_irqrestore(&chan->lock, flags);
1151     return ret;
1152 }
1153 
1154 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
1155                  dma_addr_t data, int len, int directed)
1156 {
1157     struct submit_info si;
1158     unsigned long flags;
1159     int ret;
1160 
1161     si.chan = chan;
1162     si.token = token;
1163     si.data_virt = NULL;
1164     si.data_dma = data;
1165     si.len = len;
1166     si.directed = directed;
1167 
1168     spin_lock_irqsave(&chan->lock, flags);
1169     if (chan->state != CPDMA_STATE_ACTIVE) {
1170         spin_unlock_irqrestore(&chan->lock, flags);
1171         return -EINVAL;
1172     }
1173 
1174     ret = cpdma_chan_submit_si(&si);
1175     spin_unlock_irqrestore(&chan->lock, flags);
1176     return ret;
1177 }
1178 
1179 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1180 {
1181     struct cpdma_ctlr   *ctlr = chan->ctlr;
1182     struct cpdma_desc_pool  *pool = ctlr->pool;
1183     bool            free_tx_desc;
1184     unsigned long       flags;
1185 
1186     spin_lock_irqsave(&chan->lock, flags);
1187     free_tx_desc = (chan->count < chan->desc_num) &&
1188              gen_pool_avail(pool->gen_pool);
1189     spin_unlock_irqrestore(&chan->lock, flags);
1190     return free_tx_desc;
1191 }
1192 
1193 static void __cpdma_chan_free(struct cpdma_chan *chan,
1194                   struct cpdma_desc __iomem *desc,
1195                   int outlen, int status)
1196 {
1197     struct cpdma_ctlr       *ctlr = chan->ctlr;
1198     struct cpdma_desc_pool      *pool = ctlr->pool;
1199     dma_addr_t          buff_dma;
1200     int             origlen;
1201     uintptr_t           token;
1202 
1203     token      = desc_read(desc, sw_token);
1204     origlen    = desc_read(desc, sw_len);
1205 
1206     buff_dma   = desc_read(desc, sw_buffer);
1207     if (origlen & CPDMA_DMA_EXT_MAP) {
1208         origlen &= ~CPDMA_DMA_EXT_MAP;
1209         dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
1210                     chan->dir);
1211     } else {
1212         dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1213     }
1214 
1215     cpdma_desc_free(pool, desc, 1);
1216     (*chan->handler)((void *)token, outlen, status);
1217 }
1218 
1219 static int __cpdma_chan_process(struct cpdma_chan *chan)
1220 {
1221     struct cpdma_ctlr       *ctlr = chan->ctlr;
1222     struct cpdma_desc __iomem   *desc;
1223     int             status, outlen;
1224     int             cb_status = 0;
1225     struct cpdma_desc_pool      *pool = ctlr->pool;
1226     dma_addr_t          desc_dma;
1227     unsigned long           flags;
1228 
1229     spin_lock_irqsave(&chan->lock, flags);
1230 
1231     desc = chan->head;
1232     if (!desc) {
1233         chan->stats.empty_dequeue++;
1234         status = -ENOENT;
1235         goto unlock_ret;
1236     }
1237     desc_dma = desc_phys(pool, desc);
1238 
1239     status  = desc_read(desc, hw_mode);
1240     outlen  = status & 0x7ff;
1241     if (status & CPDMA_DESC_OWNER) {
1242         chan->stats.busy_dequeue++;
1243         status = -EBUSY;
1244         goto unlock_ret;
1245     }
1246 
1247     if (status & CPDMA_DESC_PASS_CRC)
1248         outlen -= CPDMA_DESC_CRC_LEN;
1249 
1250     status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1251                 CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1252 
1253     chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1254     chan_write(chan, cp, desc_dma);
1255     chan->count--;
1256     chan->stats.good_dequeue++;
1257 
1258     if ((status & CPDMA_DESC_EOQ) && chan->head) {
1259         chan->stats.requeue++;
1260         chan_write(chan, hdp, desc_phys(pool, chan->head));
1261     }
1262 
1263     spin_unlock_irqrestore(&chan->lock, flags);
1264     if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1265         cb_status = -ENOSYS;
1266     else
1267         cb_status = status;
1268 
1269     __cpdma_chan_free(chan, desc, outlen, cb_status);
1270     return status;
1271 
1272 unlock_ret:
1273     spin_unlock_irqrestore(&chan->lock, flags);
1274     return status;
1275 }
1276 
1277 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1278 {
1279     int used = 0, ret = 0;
1280 
1281     if (chan->state != CPDMA_STATE_ACTIVE)
1282         return -EINVAL;
1283 
1284     while (used < quota) {
1285         ret = __cpdma_chan_process(chan);
1286         if (ret < 0)
1287             break;
1288         used++;
1289     }
1290     return used;
1291 }
1292 
1293 int cpdma_chan_start(struct cpdma_chan *chan)
1294 {
1295     struct cpdma_ctlr *ctlr = chan->ctlr;
1296     unsigned long flags;
1297     int ret;
1298 
1299     spin_lock_irqsave(&ctlr->lock, flags);
1300     ret = cpdma_chan_set_chan_shaper(chan);
1301     spin_unlock_irqrestore(&ctlr->lock, flags);
1302     if (ret)
1303         return ret;
1304 
1305     ret = cpdma_chan_on(chan);
1306     if (ret)
1307         return ret;
1308 
1309     return 0;
1310 }
1311 
1312 int cpdma_chan_stop(struct cpdma_chan *chan)
1313 {
1314     struct cpdma_ctlr   *ctlr = chan->ctlr;
1315     struct cpdma_desc_pool  *pool = ctlr->pool;
1316     unsigned long       flags;
1317     int         ret;
1318     unsigned        timeout;
1319 
1320     spin_lock_irqsave(&chan->lock, flags);
1321     if (chan->state == CPDMA_STATE_TEARDOWN) {
1322         spin_unlock_irqrestore(&chan->lock, flags);
1323         return -EINVAL;
1324     }
1325 
1326     chan->state = CPDMA_STATE_TEARDOWN;
1327     dma_reg_write(ctlr, chan->int_clear, chan->mask);
1328 
1329     /* trigger teardown */
1330     dma_reg_write(ctlr, chan->td, chan_linear(chan));
1331 
1332     /* wait for teardown complete */
1333     timeout = 100 * 100; /* 100 ms */
1334     while (timeout) {
1335         u32 cp = chan_read(chan, cp);
1336         if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1337             break;
1338         udelay(10);
1339         timeout--;
1340     }
1341     WARN_ON(!timeout);
1342     chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1343 
1344     /* handle completed packets */
1345     spin_unlock_irqrestore(&chan->lock, flags);
1346     do {
1347         ret = __cpdma_chan_process(chan);
1348         if (ret < 0)
1349             break;
1350     } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1351     spin_lock_irqsave(&chan->lock, flags);
1352 
1353     /* remaining packets haven't been tx/rx'ed, clean them up */
1354     while (chan->head) {
1355         struct cpdma_desc __iomem *desc = chan->head;
1356         dma_addr_t next_dma;
1357 
1358         next_dma = desc_read(desc, hw_next);
1359         chan->head = desc_from_phys(pool, next_dma);
1360         chan->count--;
1361         chan->stats.teardown_dequeue++;
1362 
1363         /* issue callback without locks held */
1364         spin_unlock_irqrestore(&chan->lock, flags);
1365         __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1366         spin_lock_irqsave(&chan->lock, flags);
1367     }
1368 
1369     chan->state = CPDMA_STATE_IDLE;
1370     spin_unlock_irqrestore(&chan->lock, flags);
1371     return 0;
1372 }
1373 
1374 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1375 {
1376     unsigned long flags;
1377 
1378     spin_lock_irqsave(&chan->lock, flags);
1379     if (chan->state != CPDMA_STATE_ACTIVE) {
1380         spin_unlock_irqrestore(&chan->lock, flags);
1381         return -EINVAL;
1382     }
1383 
1384     dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1385               chan->mask);
1386     spin_unlock_irqrestore(&chan->lock, flags);
1387 
1388     return 0;
1389 }
1390 
1391 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1392 {
1393     unsigned long flags;
1394     int ret;
1395 
1396     spin_lock_irqsave(&ctlr->lock, flags);
1397     ret = _cpdma_control_get(ctlr, control);
1398     spin_unlock_irqrestore(&ctlr->lock, flags);
1399 
1400     return ret;
1401 }
1402 
1403 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1404 {
1405     unsigned long flags;
1406     int ret;
1407 
1408     spin_lock_irqsave(&ctlr->lock, flags);
1409     ret = _cpdma_control_set(ctlr, control, value);
1410     spin_unlock_irqrestore(&ctlr->lock, flags);
1411 
1412     return ret;
1413 }
1414 
1415 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1416 {
1417     return ctlr->num_rx_desc;
1418 }
1419 
1420 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1421 {
1422     return ctlr->num_tx_desc;
1423 }
1424 
1425 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1426 {
1427     unsigned long flags;
1428     int temp, ret;
1429 
1430     spin_lock_irqsave(&ctlr->lock, flags);
1431 
1432     temp = ctlr->num_rx_desc;
1433     ctlr->num_rx_desc = num_rx_desc;
1434     ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1435     ret = cpdma_chan_split_pool(ctlr);
1436     if (ret) {
1437         ctlr->num_rx_desc = temp;
1438         ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1439     }
1440 
1441     spin_unlock_irqrestore(&ctlr->lock, flags);
1442 
1443     return ret;
1444 }