0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <asm/barrier.h>
0011 #include <dt-bindings/dma/at91.h>
0012 #include <linux/clk.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dmapool.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/irq.h>
0017 #include <linux/kernel.h>
0018 #include <linux/list.h>
0019 #include <linux/module.h>
0020 #include <linux/of_dma.h>
0021 #include <linux/of_platform.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/pm.h>
0024
0025 #include "dmaengine.h"
0026
0027
0028 #define AT_XDMAC_GTYPE 0x00
0029 #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1)
0030 #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF)
0031 #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1)
0032 #define AT_XDMAC_GCFG 0x04
0033 #define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
0034 #define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
0035 #define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
0036 #define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
0037 #define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
0038 #define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
0039 #define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
0040 #define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
0041 #define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
0042 AT_XDMAC_WRHP(0x5))
0043 #define AT_XDMAC_GWAC 0x08
0044 #define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
0045 #define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
0046 #define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
0047 #define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
0048 #define AT_XDMAC_GWAC_M2M 0
0049 #define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
0050
0051 #define AT_XDMAC_GIE 0x0C
0052 #define AT_XDMAC_GID 0x10
0053 #define AT_XDMAC_GIM 0x14
0054 #define AT_XDMAC_GIS 0x18
0055 #define AT_XDMAC_GE 0x1C
0056 #define AT_XDMAC_GD 0x20
0057 #define AT_XDMAC_GS 0x24
0058 #define AT_XDMAC_VERSION 0xFFC
0059
0060
0061 #define AT_XDMAC_CIE 0x00
0062 #define AT_XDMAC_CIE_BIE BIT(0)
0063 #define AT_XDMAC_CIE_LIE BIT(1)
0064 #define AT_XDMAC_CIE_DIE BIT(2)
0065 #define AT_XDMAC_CIE_FIE BIT(3)
0066 #define AT_XDMAC_CIE_RBEIE BIT(4)
0067 #define AT_XDMAC_CIE_WBEIE BIT(5)
0068 #define AT_XDMAC_CIE_ROIE BIT(6)
0069 #define AT_XDMAC_CID 0x04
0070 #define AT_XDMAC_CID_BID BIT(0)
0071 #define AT_XDMAC_CID_LID BIT(1)
0072 #define AT_XDMAC_CID_DID BIT(2)
0073 #define AT_XDMAC_CID_FID BIT(3)
0074 #define AT_XDMAC_CID_RBEID BIT(4)
0075 #define AT_XDMAC_CID_WBEID BIT(5)
0076 #define AT_XDMAC_CID_ROID BIT(6)
0077 #define AT_XDMAC_CIM 0x08
0078 #define AT_XDMAC_CIM_BIM BIT(0)
0079 #define AT_XDMAC_CIM_LIM BIT(1)
0080 #define AT_XDMAC_CIM_DIM BIT(2)
0081 #define AT_XDMAC_CIM_FIM BIT(3)
0082 #define AT_XDMAC_CIM_RBEIM BIT(4)
0083 #define AT_XDMAC_CIM_WBEIM BIT(5)
0084 #define AT_XDMAC_CIM_ROIM BIT(6)
0085 #define AT_XDMAC_CIS 0x0C
0086 #define AT_XDMAC_CIS_BIS BIT(0)
0087 #define AT_XDMAC_CIS_LIS BIT(1)
0088 #define AT_XDMAC_CIS_DIS BIT(2)
0089 #define AT_XDMAC_CIS_FIS BIT(3)
0090 #define AT_XDMAC_CIS_RBEIS BIT(4)
0091 #define AT_XDMAC_CIS_WBEIS BIT(5)
0092 #define AT_XDMAC_CIS_ROIS BIT(6)
0093 #define AT_XDMAC_CSA 0x10
0094 #define AT_XDMAC_CDA 0x14
0095 #define AT_XDMAC_CNDA 0x18
0096 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1)
0097 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc)
0098 #define AT_XDMAC_CNDC 0x1C
0099 #define AT_XDMAC_CNDC_NDE (0x1 << 0)
0100 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1)
0101 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2)
0102 #define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
0103 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3)
0104 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3)
0105 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3)
0106 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3)
0107 #define AT_XDMAC_CUBC 0x20
0108 #define AT_XDMAC_CBC 0x24
0109 #define AT_XDMAC_CC 0x28
0110 #define AT_XDMAC_CC_TYPE (0x1 << 0)
0111 #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0)
0112 #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0)
0113 #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
0114 #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
0115 #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
0116 #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
0117 #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
0118 #define AT_XDMAC_CC_DSYNC (0x1 << 4)
0119 #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
0120 #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
0121 #define AT_XDMAC_CC_PROT (0x1 << 5)
0122 #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
0123 #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
0124 #define AT_XDMAC_CC_SWREQ (0x1 << 6)
0125 #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
0126 #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
0127 #define AT_XDMAC_CC_MEMSET (0x1 << 7)
0128 #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
0129 #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
0130 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8)
0131 #define AT_XDMAC_CC_DWIDTH_OFFSET 11
0132 #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
0133 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)
0134 #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
0135 #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
0136 #define AT_XDMAC_CC_DWIDTH_WORD 0x2
0137 #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
0138 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13)
0139 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14)
0140 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16)
0141 #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
0142 #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
0143 #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
0144 #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
0145 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18)
0146 #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
0147 #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
0148 #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
0149 #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
0150 #define AT_XDMAC_CC_INITD (0x1 << 21)
0151 #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
0152 #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
0153 #define AT_XDMAC_CC_RDIP (0x1 << 22)
0154 #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
0155 #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
0156 #define AT_XDMAC_CC_WRIP (0x1 << 23)
0157 #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
0158 #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
0159 #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24)
0160 #define AT_XDMAC_CDS_MSP 0x2C
0161 #define AT_XDMAC_CSUS 0x30
0162 #define AT_XDMAC_CDUS 0x34
0163
0164
0165 #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL
0166 #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24)
0167 #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25)
0168 #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26)
0169 #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27)
0170 #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27)
0171 #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27)
0172 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27)
0173
0174 #define AT_XDMAC_MAX_CHAN 0x20
0175 #define AT_XDMAC_MAX_CSIZE 16
0176 #define AT_XDMAC_MAX_DWIDTH 8
0177 #define AT_XDMAC_RESIDUE_MAX_RETRIES 5
0178
0179 #define AT_XDMAC_DMA_BUSWIDTHS\
0180 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
0181 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
0182 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
0183 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
0184 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
0185
0186 enum atc_status {
0187 AT_XDMAC_CHAN_IS_CYCLIC = 0,
0188 AT_XDMAC_CHAN_IS_PAUSED,
0189 };
0190
0191 struct at_xdmac_layout {
0192
0193 u8 grs;
0194
0195 u8 gws;
0196
0197 u8 grws;
0198
0199 u8 grwr;
0200
0201 u8 gswr;
0202
0203 u8 gsws;
0204
0205 u8 gswf;
0206
0207 u8 chan_cc_reg_base;
0208
0209 bool sdif;
0210
0211 bool axi_config;
0212 };
0213
0214
0215 struct at_xdmac_chan {
0216 struct dma_chan chan;
0217 void __iomem *ch_regs;
0218 u32 mask;
0219 u32 cfg;
0220 u8 perid;
0221 u8 perif;
0222 u8 memif;
0223 u32 save_cc;
0224 u32 save_cim;
0225 u32 save_cnda;
0226 u32 save_cndc;
0227 u32 irq_status;
0228 unsigned long status;
0229 struct tasklet_struct tasklet;
0230 struct dma_slave_config sconfig;
0231
0232 spinlock_t lock;
0233
0234 struct list_head xfers_list;
0235 struct list_head free_descs_list;
0236 };
0237
0238
0239
0240 struct at_xdmac {
0241 struct dma_device dma;
0242 void __iomem *regs;
0243 int irq;
0244 struct clk *clk;
0245 u32 save_gim;
0246 struct dma_pool *at_xdmac_desc_pool;
0247 const struct at_xdmac_layout *layout;
0248 struct at_xdmac_chan chan[];
0249 };
0250
0251
0252
0253
0254
0255 struct at_xdmac_lld {
0256 u32 mbr_nda;
0257 u32 mbr_ubc;
0258 u32 mbr_sa;
0259 u32 mbr_da;
0260 u32 mbr_cfg;
0261 u32 mbr_bc;
0262 u32 mbr_ds;
0263 u32 mbr_sus;
0264 u32 mbr_dus;
0265 };
0266
0267
0268 struct at_xdmac_desc {
0269 struct at_xdmac_lld lld;
0270 enum dma_transfer_direction direction;
0271 struct dma_async_tx_descriptor tx_dma_desc;
0272 struct list_head desc_node;
0273
0274 bool active_xfer;
0275 unsigned int xfer_size;
0276 struct list_head descs_list;
0277 struct list_head xfer_node;
0278 } __aligned(sizeof(u64));
0279
0280 static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
0281 .grs = 0x28,
0282 .gws = 0x2C,
0283 .grws = 0x30,
0284 .grwr = 0x34,
0285 .gswr = 0x38,
0286 .gsws = 0x3C,
0287 .gswf = 0x40,
0288 .chan_cc_reg_base = 0x50,
0289 .sdif = true,
0290 .axi_config = false,
0291 };
0292
0293 static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
0294 .grs = 0x30,
0295 .gws = 0x38,
0296 .grws = 0x40,
0297 .grwr = 0x44,
0298 .gswr = 0x48,
0299 .gsws = 0x4C,
0300 .gswf = 0x50,
0301 .chan_cc_reg_base = 0x60,
0302 .sdif = false,
0303 .axi_config = true,
0304 };
0305
0306 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
0307 {
0308 return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
0309 }
0310
0311 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
0312 #define at_xdmac_write(atxdmac, reg, value) \
0313 writel_relaxed((value), (atxdmac)->regs + (reg))
0314
0315 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
0316 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
0317
0318 static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
0319 {
0320 return container_of(dchan, struct at_xdmac_chan, chan);
0321 }
0322
0323 static struct device *chan2dev(struct dma_chan *chan)
0324 {
0325 return &chan->dev->device;
0326 }
0327
0328 static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
0329 {
0330 return container_of(ddev, struct at_xdmac, dma);
0331 }
0332
0333 static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
0334 {
0335 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
0336 }
0337
0338 static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
0339 {
0340 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
0341 }
0342
0343 static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
0344 {
0345 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
0346 }
0347
0348 static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
0349 {
0350 return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
0351 }
0352
0353 static inline u8 at_xdmac_get_dwidth(u32 cfg)
0354 {
0355 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
0356 };
0357
0358 static unsigned int init_nr_desc_per_channel = 64;
0359 module_param(init_nr_desc_per_channel, uint, 0644);
0360 MODULE_PARM_DESC(init_nr_desc_per_channel,
0361 "initial descriptors per channel (default: 64)");
0362
0363
0364 static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
0365 {
0366 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
0367 }
0368
0369 static void at_xdmac_off(struct at_xdmac *atxdmac)
0370 {
0371 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
0372
0373
0374 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
0375 cpu_relax();
0376
0377 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
0378 }
0379
0380
0381 static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
0382 struct at_xdmac_desc *first)
0383 {
0384 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
0385 u32 reg;
0386
0387 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
0388
0389
0390 first->active_xfer = true;
0391
0392
0393 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
0394 if (atxdmac->layout->sdif)
0395 reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
0396
0397 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
0398
0399
0400
0401
0402
0403
0404 if (at_xdmac_chan_is_cyclic(atchan))
0405 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
0406 else if ((first->lld.mbr_ubc &
0407 AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
0408 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
0409 else
0410 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
0411
0412
0413
0414
0415
0416
0417 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
0418
0419 reg |= AT_XDMAC_CNDC_NDDUP
0420 | AT_XDMAC_CNDC_NDSUP
0421 | AT_XDMAC_CNDC_NDE;
0422 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
0423
0424 dev_vdbg(chan2dev(&atchan->chan),
0425 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
0426 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
0427 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
0428 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
0429 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
0430 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
0431 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
0432
0433 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
0434 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
0435
0436
0437
0438 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
0439 reg |= AT_XDMAC_CIE_ROIE;
0440
0441
0442
0443
0444
0445 if (at_xdmac_chan_is_cyclic(atchan))
0446 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
0447 reg | AT_XDMAC_CIE_BIE);
0448 else
0449 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
0450 reg | AT_XDMAC_CIE_LIE);
0451 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
0452 dev_vdbg(chan2dev(&atchan->chan),
0453 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
0454 wmb();
0455 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
0456
0457 dev_vdbg(chan2dev(&atchan->chan),
0458 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
0459 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
0460 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
0461 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
0462 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
0463 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
0464 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
0465
0466 }
0467
0468 static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
0469 {
0470 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
0471 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
0472 dma_cookie_t cookie;
0473 unsigned long irqflags;
0474
0475 spin_lock_irqsave(&atchan->lock, irqflags);
0476 cookie = dma_cookie_assign(tx);
0477
0478 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
0479 spin_unlock_irqrestore(&atchan->lock, irqflags);
0480
0481 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
0482 __func__, atchan, desc);
0483
0484 return cookie;
0485 }
0486
0487 static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
0488 gfp_t gfp_flags)
0489 {
0490 struct at_xdmac_desc *desc;
0491 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
0492 dma_addr_t phys;
0493
0494 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
0495 if (desc) {
0496 INIT_LIST_HEAD(&desc->descs_list);
0497 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
0498 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
0499 desc->tx_dma_desc.phys = phys;
0500 }
0501
0502 return desc;
0503 }
0504
0505 static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
0506 {
0507 memset(&desc->lld, 0, sizeof(desc->lld));
0508 INIT_LIST_HEAD(&desc->descs_list);
0509 desc->direction = DMA_TRANS_NONE;
0510 desc->xfer_size = 0;
0511 desc->active_xfer = false;
0512 }
0513
0514
0515 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
0516 {
0517 struct at_xdmac_desc *desc;
0518
0519 if (list_empty(&atchan->free_descs_list)) {
0520 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
0521 } else {
0522 desc = list_first_entry(&atchan->free_descs_list,
0523 struct at_xdmac_desc, desc_node);
0524 list_del(&desc->desc_node);
0525 at_xdmac_init_used_desc(desc);
0526 }
0527
0528 return desc;
0529 }
0530
0531 static void at_xdmac_queue_desc(struct dma_chan *chan,
0532 struct at_xdmac_desc *prev,
0533 struct at_xdmac_desc *desc)
0534 {
0535 if (!prev || !desc)
0536 return;
0537
0538 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
0539 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
0540
0541 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
0542 __func__, prev, &prev->lld.mbr_nda);
0543 }
0544
0545 static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
0546 struct at_xdmac_desc *desc)
0547 {
0548 if (!desc)
0549 return;
0550
0551 desc->lld.mbr_bc++;
0552
0553 dev_dbg(chan2dev(chan),
0554 "%s: incrementing the block count of the desc 0x%p\n",
0555 __func__, desc);
0556 }
0557
0558 static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
0559 struct of_dma *of_dma)
0560 {
0561 struct at_xdmac *atxdmac = of_dma->of_dma_data;
0562 struct at_xdmac_chan *atchan;
0563 struct dma_chan *chan;
0564 struct device *dev = atxdmac->dma.dev;
0565
0566 if (dma_spec->args_count != 1) {
0567 dev_err(dev, "dma phandler args: bad number of args\n");
0568 return NULL;
0569 }
0570
0571 chan = dma_get_any_slave_channel(&atxdmac->dma);
0572 if (!chan) {
0573 dev_err(dev, "can't get a dma channel\n");
0574 return NULL;
0575 }
0576
0577 atchan = to_at_xdmac_chan(chan);
0578 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
0579 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
0580 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
0581 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
0582 atchan->memif, atchan->perif, atchan->perid);
0583
0584 return chan;
0585 }
0586
0587 static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
0588 enum dma_transfer_direction direction)
0589 {
0590 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
0591 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
0592 int csize, dwidth;
0593
0594 if (direction == DMA_DEV_TO_MEM) {
0595 atchan->cfg =
0596 AT91_XDMAC_DT_PERID(atchan->perid)
0597 | AT_XDMAC_CC_DAM_INCREMENTED_AM
0598 | AT_XDMAC_CC_SAM_FIXED_AM
0599 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
0600 | AT_XDMAC_CC_DSYNC_PER2MEM
0601 | AT_XDMAC_CC_MBSIZE_SIXTEEN
0602 | AT_XDMAC_CC_TYPE_PER_TRAN;
0603 if (atxdmac->layout->sdif)
0604 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
0605 AT_XDMAC_CC_SIF(atchan->perif);
0606
0607 csize = ffs(atchan->sconfig.src_maxburst) - 1;
0608 if (csize < 0) {
0609 dev_err(chan2dev(chan), "invalid src maxburst value\n");
0610 return -EINVAL;
0611 }
0612 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
0613 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
0614 if (dwidth < 0) {
0615 dev_err(chan2dev(chan), "invalid src addr width value\n");
0616 return -EINVAL;
0617 }
0618 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
0619 } else if (direction == DMA_MEM_TO_DEV) {
0620 atchan->cfg =
0621 AT91_XDMAC_DT_PERID(atchan->perid)
0622 | AT_XDMAC_CC_DAM_FIXED_AM
0623 | AT_XDMAC_CC_SAM_INCREMENTED_AM
0624 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
0625 | AT_XDMAC_CC_DSYNC_MEM2PER
0626 | AT_XDMAC_CC_MBSIZE_SIXTEEN
0627 | AT_XDMAC_CC_TYPE_PER_TRAN;
0628 if (atxdmac->layout->sdif)
0629 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
0630 AT_XDMAC_CC_SIF(atchan->memif);
0631
0632 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
0633 if (csize < 0) {
0634 dev_err(chan2dev(chan), "invalid src maxburst value\n");
0635 return -EINVAL;
0636 }
0637 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
0638 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
0639 if (dwidth < 0) {
0640 dev_err(chan2dev(chan), "invalid dst addr width value\n");
0641 return -EINVAL;
0642 }
0643 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
0644 }
0645
0646 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
0647
0648 return 0;
0649 }
0650
0651
0652
0653
0654
0655
0656 static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
0657 {
0658 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
0659 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
0660 return -EINVAL;
0661
0662 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
0663 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
0664 return -EINVAL;
0665
0666 return 0;
0667 }
0668
0669 static int at_xdmac_set_slave_config(struct dma_chan *chan,
0670 struct dma_slave_config *sconfig)
0671 {
0672 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
0673
0674 if (at_xdmac_check_slave_config(sconfig)) {
0675 dev_err(chan2dev(chan), "invalid slave configuration\n");
0676 return -EINVAL;
0677 }
0678
0679 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
0680
0681 return 0;
0682 }
0683
0684 static struct dma_async_tx_descriptor *
0685 at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
0686 unsigned int sg_len, enum dma_transfer_direction direction,
0687 unsigned long flags, void *context)
0688 {
0689 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
0690 struct at_xdmac_desc *first = NULL, *prev = NULL;
0691 struct scatterlist *sg;
0692 int i;
0693 unsigned int xfer_size = 0;
0694 unsigned long irqflags;
0695 struct dma_async_tx_descriptor *ret = NULL;
0696
0697 if (!sgl)
0698 return NULL;
0699
0700 if (!is_slave_direction(direction)) {
0701 dev_err(chan2dev(chan), "invalid DMA direction\n");
0702 return NULL;
0703 }
0704
0705 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
0706 __func__, sg_len,
0707 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
0708 flags);
0709
0710
0711 spin_lock_irqsave(&atchan->lock, irqflags);
0712
0713 if (at_xdmac_compute_chan_conf(chan, direction))
0714 goto spin_unlock;
0715
0716
0717 for_each_sg(sgl, sg, sg_len, i) {
0718 struct at_xdmac_desc *desc = NULL;
0719 u32 len, mem, dwidth, fixed_dwidth;
0720
0721 len = sg_dma_len(sg);
0722 mem = sg_dma_address(sg);
0723 if (unlikely(!len)) {
0724 dev_err(chan2dev(chan), "sg data length is zero\n");
0725 goto spin_unlock;
0726 }
0727 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
0728 __func__, i, len, mem);
0729
0730 desc = at_xdmac_get_desc(atchan);
0731 if (!desc) {
0732 dev_err(chan2dev(chan), "can't get descriptor\n");
0733 if (first)
0734 list_splice_tail_init(&first->descs_list,
0735 &atchan->free_descs_list);
0736 goto spin_unlock;
0737 }
0738
0739
0740 if (direction == DMA_DEV_TO_MEM) {
0741 desc->lld.mbr_sa = atchan->sconfig.src_addr;
0742 desc->lld.mbr_da = mem;
0743 } else {
0744 desc->lld.mbr_sa = mem;
0745 desc->lld.mbr_da = atchan->sconfig.dst_addr;
0746 }
0747 dwidth = at_xdmac_get_dwidth(atchan->cfg);
0748 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
0749 ? dwidth
0750 : AT_XDMAC_CC_DWIDTH_BYTE;
0751 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
0752 | AT_XDMAC_MBR_UBC_NDEN
0753 | AT_XDMAC_MBR_UBC_NSEN
0754 | (len >> fixed_dwidth);
0755 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
0756 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
0757 dev_dbg(chan2dev(chan),
0758 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
0759 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
0760
0761
0762 if (prev)
0763 at_xdmac_queue_desc(chan, prev, desc);
0764
0765 prev = desc;
0766 if (!first)
0767 first = desc;
0768
0769 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
0770 __func__, desc, first);
0771 list_add_tail(&desc->desc_node, &first->descs_list);
0772 xfer_size += len;
0773 }
0774
0775
0776 first->tx_dma_desc.flags = flags;
0777 first->xfer_size = xfer_size;
0778 first->direction = direction;
0779 ret = &first->tx_dma_desc;
0780
0781 spin_unlock:
0782 spin_unlock_irqrestore(&atchan->lock, irqflags);
0783 return ret;
0784 }
0785
0786 static struct dma_async_tx_descriptor *
0787 at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
0788 size_t buf_len, size_t period_len,
0789 enum dma_transfer_direction direction,
0790 unsigned long flags)
0791 {
0792 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
0793 struct at_xdmac_desc *first = NULL, *prev = NULL;
0794 unsigned int periods = buf_len / period_len;
0795 int i;
0796 unsigned long irqflags;
0797
0798 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
0799 __func__, &buf_addr, buf_len, period_len,
0800 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
0801
0802 if (!is_slave_direction(direction)) {
0803 dev_err(chan2dev(chan), "invalid DMA direction\n");
0804 return NULL;
0805 }
0806
0807 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
0808 dev_err(chan2dev(chan), "channel currently used\n");
0809 return NULL;
0810 }
0811
0812 if (at_xdmac_compute_chan_conf(chan, direction))
0813 return NULL;
0814
0815 for (i = 0; i < periods; i++) {
0816 struct at_xdmac_desc *desc = NULL;
0817
0818 spin_lock_irqsave(&atchan->lock, irqflags);
0819 desc = at_xdmac_get_desc(atchan);
0820 if (!desc) {
0821 dev_err(chan2dev(chan), "can't get descriptor\n");
0822 if (first)
0823 list_splice_tail_init(&first->descs_list,
0824 &atchan->free_descs_list);
0825 spin_unlock_irqrestore(&atchan->lock, irqflags);
0826 return NULL;
0827 }
0828 spin_unlock_irqrestore(&atchan->lock, irqflags);
0829 dev_dbg(chan2dev(chan),
0830 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
0831 __func__, desc, &desc->tx_dma_desc.phys);
0832
0833 if (direction == DMA_DEV_TO_MEM) {
0834 desc->lld.mbr_sa = atchan->sconfig.src_addr;
0835 desc->lld.mbr_da = buf_addr + i * period_len;
0836 } else {
0837 desc->lld.mbr_sa = buf_addr + i * period_len;
0838 desc->lld.mbr_da = atchan->sconfig.dst_addr;
0839 }
0840 desc->lld.mbr_cfg = atchan->cfg;
0841 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
0842 | AT_XDMAC_MBR_UBC_NDEN
0843 | AT_XDMAC_MBR_UBC_NSEN
0844 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
0845
0846 dev_dbg(chan2dev(chan),
0847 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
0848 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
0849
0850
0851 if (prev)
0852 at_xdmac_queue_desc(chan, prev, desc);
0853
0854 prev = desc;
0855 if (!first)
0856 first = desc;
0857
0858 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
0859 __func__, desc, first);
0860 list_add_tail(&desc->desc_node, &first->descs_list);
0861 }
0862
0863 at_xdmac_queue_desc(chan, prev, first);
0864 first->tx_dma_desc.flags = flags;
0865 first->xfer_size = buf_len;
0866 first->direction = direction;
0867
0868 return &first->tx_dma_desc;
0869 }
0870
0871 static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
0872 {
0873 u32 width;
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 if (!(addr & 7)) {
0884 width = AT_XDMAC_CC_DWIDTH_DWORD;
0885 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
0886 } else if (!(addr & 3)) {
0887 width = AT_XDMAC_CC_DWIDTH_WORD;
0888 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
0889 } else if (!(addr & 1)) {
0890 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
0891 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
0892 } else {
0893 width = AT_XDMAC_CC_DWIDTH_BYTE;
0894 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
0895 }
0896
0897 return width;
0898 }
0899
0900 static struct at_xdmac_desc *
0901 at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
0902 struct at_xdmac_chan *atchan,
0903 struct at_xdmac_desc *prev,
0904 dma_addr_t src, dma_addr_t dst,
0905 struct dma_interleaved_template *xt,
0906 struct data_chunk *chunk)
0907 {
0908 struct at_xdmac_desc *desc;
0909 u32 dwidth;
0910 unsigned long flags;
0911 size_t ublen;
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
0929 | AT_XDMAC_CC_MBSIZE_SIXTEEN
0930 | AT_XDMAC_CC_TYPE_MEM_TRAN;
0931
0932 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
0933 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
0934 dev_dbg(chan2dev(chan),
0935 "%s: chunk too big (%zu, max size %lu)...\n",
0936 __func__, chunk->size,
0937 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
0938 return NULL;
0939 }
0940
0941 if (prev)
0942 dev_dbg(chan2dev(chan),
0943 "Adding items at the end of desc 0x%p\n", prev);
0944
0945 if (xt->src_inc) {
0946 if (xt->src_sgl)
0947 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
0948 else
0949 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
0950 }
0951
0952 if (xt->dst_inc) {
0953 if (xt->dst_sgl)
0954 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
0955 else
0956 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
0957 }
0958
0959 spin_lock_irqsave(&atchan->lock, flags);
0960 desc = at_xdmac_get_desc(atchan);
0961 spin_unlock_irqrestore(&atchan->lock, flags);
0962 if (!desc) {
0963 dev_err(chan2dev(chan), "can't get descriptor\n");
0964 return NULL;
0965 }
0966
0967 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
0968
0969 ublen = chunk->size >> dwidth;
0970
0971 desc->lld.mbr_sa = src;
0972 desc->lld.mbr_da = dst;
0973 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
0974 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
0975
0976 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
0977 | AT_XDMAC_MBR_UBC_NDEN
0978 | AT_XDMAC_MBR_UBC_NSEN
0979 | ublen;
0980 desc->lld.mbr_cfg = chan_cc;
0981
0982 dev_dbg(chan2dev(chan),
0983 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
0984 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
0985 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
0986
0987
0988 if (prev)
0989 at_xdmac_queue_desc(chan, prev, desc);
0990
0991 return desc;
0992 }
0993
0994 static struct dma_async_tx_descriptor *
0995 at_xdmac_prep_interleaved(struct dma_chan *chan,
0996 struct dma_interleaved_template *xt,
0997 unsigned long flags)
0998 {
0999 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1000 struct at_xdmac_desc *prev = NULL, *first = NULL;
1001 dma_addr_t dst_addr, src_addr;
1002 size_t src_skip = 0, dst_skip = 0, len = 0;
1003 struct data_chunk *chunk;
1004 int i;
1005
1006 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1007 return NULL;
1008
1009
1010
1011
1012
1013 if ((xt->numf > 1) && (xt->frame_size > 1))
1014 return NULL;
1015
1016 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1017 __func__, &xt->src_start, &xt->dst_start, xt->numf,
1018 xt->frame_size, flags);
1019
1020 src_addr = xt->src_start;
1021 dst_addr = xt->dst_start;
1022
1023 if (xt->numf > 1) {
1024 first = at_xdmac_interleaved_queue_desc(chan, atchan,
1025 NULL,
1026 src_addr, dst_addr,
1027 xt, xt->sgl);
1028
1029
1030 for (i = 0; i < xt->numf - 1; i++)
1031 at_xdmac_increment_block_count(chan, first);
1032
1033 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1034 __func__, first, first);
1035 list_add_tail(&first->desc_node, &first->descs_list);
1036 } else {
1037 for (i = 0; i < xt->frame_size; i++) {
1038 size_t src_icg = 0, dst_icg = 0;
1039 struct at_xdmac_desc *desc;
1040
1041 chunk = xt->sgl + i;
1042
1043 dst_icg = dmaengine_get_dst_icg(xt, chunk);
1044 src_icg = dmaengine_get_src_icg(xt, chunk);
1045
1046 src_skip = chunk->size + src_icg;
1047 dst_skip = chunk->size + dst_icg;
1048
1049 dev_dbg(chan2dev(chan),
1050 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1051 __func__, chunk->size, src_icg, dst_icg);
1052
1053 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1054 prev,
1055 src_addr, dst_addr,
1056 xt, chunk);
1057 if (!desc) {
1058 list_splice_tail_init(&first->descs_list,
1059 &atchan->free_descs_list);
1060 return NULL;
1061 }
1062
1063 if (!first)
1064 first = desc;
1065
1066 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1067 __func__, desc, first);
1068 list_add_tail(&desc->desc_node, &first->descs_list);
1069
1070 if (xt->src_sgl)
1071 src_addr += src_skip;
1072
1073 if (xt->dst_sgl)
1074 dst_addr += dst_skip;
1075
1076 len += chunk->size;
1077 prev = desc;
1078 }
1079 }
1080
1081 first->tx_dma_desc.cookie = -EBUSY;
1082 first->tx_dma_desc.flags = flags;
1083 first->xfer_size = len;
1084
1085 return &first->tx_dma_desc;
1086 }
1087
1088 static struct dma_async_tx_descriptor *
1089 at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1090 size_t len, unsigned long flags)
1091 {
1092 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1093 struct at_xdmac_desc *first = NULL, *prev = NULL;
1094 size_t remaining_size = len, xfer_size = 0, ublen;
1095 dma_addr_t src_addr = src, dst_addr = dest;
1096 u32 dwidth;
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1113 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1114 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1115 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1116 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1117 unsigned long irqflags;
1118
1119 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1120 __func__, &src, &dest, len, flags);
1121
1122 if (unlikely(!len))
1123 return NULL;
1124
1125 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1126
1127
1128 while (remaining_size) {
1129 struct at_xdmac_desc *desc = NULL;
1130
1131 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1132
1133 spin_lock_irqsave(&atchan->lock, irqflags);
1134 desc = at_xdmac_get_desc(atchan);
1135 spin_unlock_irqrestore(&atchan->lock, irqflags);
1136 if (!desc) {
1137 dev_err(chan2dev(chan), "can't get descriptor\n");
1138 if (first)
1139 list_splice_tail_init(&first->descs_list,
1140 &atchan->free_descs_list);
1141 return NULL;
1142 }
1143
1144
1145 src_addr += xfer_size;
1146 dst_addr += xfer_size;
1147
1148 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1149 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1150 else
1151 xfer_size = remaining_size;
1152
1153 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1154
1155
1156 dwidth = at_xdmac_align_width(chan,
1157 src_addr | dst_addr | xfer_size);
1158 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1159 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1160
1161 ublen = xfer_size >> dwidth;
1162 remaining_size -= xfer_size;
1163
1164 desc->lld.mbr_sa = src_addr;
1165 desc->lld.mbr_da = dst_addr;
1166 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1167 | AT_XDMAC_MBR_UBC_NDEN
1168 | AT_XDMAC_MBR_UBC_NSEN
1169 | ublen;
1170 desc->lld.mbr_cfg = chan_cc;
1171
1172 dev_dbg(chan2dev(chan),
1173 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1174 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1175
1176
1177 if (prev)
1178 at_xdmac_queue_desc(chan, prev, desc);
1179
1180 prev = desc;
1181 if (!first)
1182 first = desc;
1183
1184 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1185 __func__, desc, first);
1186 list_add_tail(&desc->desc_node, &first->descs_list);
1187 }
1188
1189 first->tx_dma_desc.flags = flags;
1190 first->xfer_size = len;
1191
1192 return &first->tx_dma_desc;
1193 }
1194
1195 static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1196 struct at_xdmac_chan *atchan,
1197 dma_addr_t dst_addr,
1198 size_t len,
1199 int value)
1200 {
1201 struct at_xdmac_desc *desc;
1202 unsigned long flags;
1203 size_t ublen;
1204 u32 dwidth;
1205 char pattern;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1223 | AT_XDMAC_CC_DAM_UBS_AM
1224 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1225 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1226 | AT_XDMAC_CC_MEMSET_HW_MODE
1227 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1228
1229 dwidth = at_xdmac_align_width(chan, dst_addr);
1230
1231 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1232 dev_err(chan2dev(chan),
1233 "%s: Transfer too large, aborting...\n",
1234 __func__);
1235 return NULL;
1236 }
1237
1238 spin_lock_irqsave(&atchan->lock, flags);
1239 desc = at_xdmac_get_desc(atchan);
1240 spin_unlock_irqrestore(&atchan->lock, flags);
1241 if (!desc) {
1242 dev_err(chan2dev(chan), "can't get descriptor\n");
1243 return NULL;
1244 }
1245
1246 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1247
1248
1249 pattern = (char)value;
1250
1251 ublen = len >> dwidth;
1252
1253 desc->lld.mbr_da = dst_addr;
1254 desc->lld.mbr_ds = (pattern << 24) |
1255 (pattern << 16) |
1256 (pattern << 8) |
1257 pattern;
1258 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1259 | AT_XDMAC_MBR_UBC_NDEN
1260 | AT_XDMAC_MBR_UBC_NSEN
1261 | ublen;
1262 desc->lld.mbr_cfg = chan_cc;
1263
1264 dev_dbg(chan2dev(chan),
1265 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1266 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1267 desc->lld.mbr_cfg);
1268
1269 return desc;
1270 }
1271
1272 static struct dma_async_tx_descriptor *
1273 at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1274 size_t len, unsigned long flags)
1275 {
1276 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1277 struct at_xdmac_desc *desc;
1278
1279 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1280 __func__, &dest, len, value, flags);
1281
1282 if (unlikely(!len))
1283 return NULL;
1284
1285 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1286 list_add_tail(&desc->desc_node, &desc->descs_list);
1287
1288 desc->tx_dma_desc.cookie = -EBUSY;
1289 desc->tx_dma_desc.flags = flags;
1290 desc->xfer_size = len;
1291
1292 return &desc->tx_dma_desc;
1293 }
1294
1295 static struct dma_async_tx_descriptor *
1296 at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1297 unsigned int sg_len, int value,
1298 unsigned long flags)
1299 {
1300 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1301 struct at_xdmac_desc *desc, *pdesc = NULL,
1302 *ppdesc = NULL, *first = NULL;
1303 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1304 size_t stride = 0, pstride = 0, len = 0;
1305 int i;
1306
1307 if (!sgl)
1308 return NULL;
1309
1310 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1311 __func__, sg_len, value, flags);
1312
1313
1314 for_each_sg(sgl, sg, sg_len, i) {
1315 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1316 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1317 value, flags);
1318 desc = at_xdmac_memset_create_desc(chan, atchan,
1319 sg_dma_address(sg),
1320 sg_dma_len(sg),
1321 value);
1322 if (!desc && first)
1323 list_splice_tail_init(&first->descs_list,
1324 &atchan->free_descs_list);
1325
1326 if (!first)
1327 first = desc;
1328
1329
1330 pstride = stride;
1331 if (psg)
1332 stride = sg_dma_address(sg) -
1333 (sg_dma_address(psg) + sg_dma_len(psg));
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 if (ppdesc && pdesc) {
1355 if ((stride == pstride) &&
1356 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1357 dev_dbg(chan2dev(chan),
1358 "%s: desc 0x%p can be merged with desc 0x%p\n",
1359 __func__, pdesc, ppdesc);
1360
1361
1362
1363
1364
1365 at_xdmac_increment_block_count(chan, ppdesc);
1366 ppdesc->lld.mbr_dus = stride;
1367
1368
1369
1370
1371
1372 list_add_tail(&pdesc->desc_node,
1373 &atchan->free_descs_list);
1374
1375
1376
1377
1378
1379
1380 pdesc = ppdesc;
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 } else if (pstride ||
1391 sg_dma_address(sg) < sg_dma_address(psg)) {
1392
1393
1394
1395
1396 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1397
1398
1399
1400
1401
1402
1403 list_add_tail(&desc->desc_node,
1404 &first->descs_list);
1405 dev_dbg(chan2dev(chan),
1406 "%s: add desc 0x%p to descs_list 0x%p\n",
1407 __func__, desc, first);
1408 }
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418 if ((i == (sg_len - 1)) &&
1419 sg_dma_len(psg) == sg_dma_len(sg)) {
1420 dev_dbg(chan2dev(chan),
1421 "%s: desc 0x%p can be merged with desc 0x%p\n",
1422 __func__, desc, pdesc);
1423
1424
1425
1426
1427
1428 at_xdmac_increment_block_count(chan, pdesc);
1429 pdesc->lld.mbr_dus = stride;
1430
1431
1432
1433
1434
1435 list_add_tail(&desc->desc_node,
1436 &atchan->free_descs_list);
1437 }
1438
1439
1440 ppdesc = pdesc;
1441 pdesc = desc;
1442
1443
1444 ppsg = psg;
1445 psg = sg;
1446
1447 len += sg_dma_len(sg);
1448 }
1449
1450 first->tx_dma_desc.cookie = -EBUSY;
1451 first->tx_dma_desc.flags = flags;
1452 first->xfer_size = len;
1453
1454 return &first->tx_dma_desc;
1455 }
1456
1457 static enum dma_status
1458 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1459 struct dma_tx_state *txstate)
1460 {
1461 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1462 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1463 struct at_xdmac_desc *desc, *_desc, *iter;
1464 struct list_head *descs_list;
1465 enum dma_status ret;
1466 int residue, retry;
1467 u32 cur_nda, check_nda, cur_ubc, mask, value;
1468 u8 dwidth = 0;
1469 unsigned long flags;
1470 bool initd;
1471
1472 ret = dma_cookie_status(chan, cookie, txstate);
1473 if (ret == DMA_COMPLETE)
1474 return ret;
1475
1476 if (!txstate)
1477 return ret;
1478
1479 spin_lock_irqsave(&atchan->lock, flags);
1480
1481 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1482
1483
1484
1485
1486
1487 if (!desc->active_xfer) {
1488 dma_set_residue(txstate, desc->xfer_size);
1489 goto spin_unlock;
1490 }
1491
1492 residue = desc->xfer_size;
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1507 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1508 if ((desc->lld.mbr_cfg & mask) == value) {
1509 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1510 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1511 cpu_relax();
1512 }
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1541 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1542 rmb();
1543 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1544 rmb();
1545 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1546 rmb();
1547 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1548 rmb();
1549
1550 if ((check_nda == cur_nda) && initd)
1551 break;
1552 }
1553
1554 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1555 ret = DMA_ERROR;
1556 goto spin_unlock;
1557 }
1558
1559
1560
1561
1562
1563
1564
1565
1566 if ((desc->lld.mbr_cfg & mask) == value) {
1567 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1568 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1569 cpu_relax();
1570 }
1571
1572
1573
1574
1575
1576
1577 descs_list = &desc->descs_list;
1578 list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1579 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1580 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1581 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1582 desc = iter;
1583 break;
1584 }
1585 }
1586 residue += cur_ubc << dwidth;
1587
1588 dma_set_residue(txstate, residue);
1589
1590 dev_dbg(chan2dev(chan),
1591 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1592 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1593
1594 spin_unlock:
1595 spin_unlock_irqrestore(&atchan->lock, flags);
1596 return ret;
1597 }
1598
1599 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1600 {
1601 struct at_xdmac_desc *desc;
1602
1603
1604
1605
1606
1607 if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1608 return;
1609
1610 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1611 xfer_node);
1612 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1613 if (!desc->active_xfer)
1614 at_xdmac_start_xfer(atchan, desc);
1615 }
1616
1617 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1618 {
1619 struct at_xdmac_desc *desc;
1620 struct dma_async_tx_descriptor *txd;
1621
1622 spin_lock_irq(&atchan->lock);
1623 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1624 __func__, atchan->irq_status);
1625 if (list_empty(&atchan->xfers_list)) {
1626 spin_unlock_irq(&atchan->lock);
1627 return;
1628 }
1629 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1630 xfer_node);
1631 spin_unlock_irq(&atchan->lock);
1632 txd = &desc->tx_dma_desc;
1633 if (txd->flags & DMA_PREP_INTERRUPT)
1634 dmaengine_desc_get_callback_invoke(txd, NULL);
1635 }
1636
1637
1638 static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1639 {
1640 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1641 struct at_xdmac_desc *bad_desc;
1642
1643
1644
1645
1646
1647
1648
1649 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1650 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1651 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1652 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1653 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1654 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1655
1656
1657 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1658 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1659 cpu_relax();
1660
1661 bad_desc = list_first_entry(&atchan->xfers_list,
1662 struct at_xdmac_desc,
1663 xfer_node);
1664
1665
1666 dev_dbg(chan2dev(&atchan->chan),
1667 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1668 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1669 bad_desc->lld.mbr_ubc);
1670
1671
1672 }
1673
1674 static void at_xdmac_tasklet(struct tasklet_struct *t)
1675 {
1676 struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1677 struct at_xdmac_desc *desc;
1678 struct dma_async_tx_descriptor *txd;
1679 u32 error_mask;
1680
1681 if (at_xdmac_chan_is_cyclic(atchan))
1682 return at_xdmac_handle_cyclic(atchan);
1683
1684 error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1685 AT_XDMAC_CIS_ROIS;
1686
1687 spin_lock_irq(&atchan->lock);
1688
1689 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1690 __func__, atchan->irq_status);
1691
1692 if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1693 !(atchan->irq_status & error_mask)) {
1694 spin_unlock_irq(&atchan->lock);
1695 return;
1696 }
1697
1698 if (atchan->irq_status & error_mask)
1699 at_xdmac_handle_error(atchan);
1700
1701 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1702 xfer_node);
1703 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1704 if (!desc->active_xfer) {
1705 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1706 spin_unlock_irq(&atchan->lock);
1707 return;
1708 }
1709
1710 txd = &desc->tx_dma_desc;
1711 dma_cookie_complete(txd);
1712
1713 list_del(&desc->xfer_node);
1714 spin_unlock_irq(&atchan->lock);
1715
1716 if (txd->flags & DMA_PREP_INTERRUPT)
1717 dmaengine_desc_get_callback_invoke(txd, NULL);
1718
1719 dma_run_dependencies(txd);
1720
1721 spin_lock_irq(&atchan->lock);
1722
1723 list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1724 at_xdmac_advance_work(atchan);
1725 spin_unlock_irq(&atchan->lock);
1726 }
1727
1728 static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1729 {
1730 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1731 struct at_xdmac_chan *atchan;
1732 u32 imr, status, pending;
1733 u32 chan_imr, chan_status;
1734 int i, ret = IRQ_NONE;
1735
1736 do {
1737 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1738 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1739 pending = status & imr;
1740
1741 dev_vdbg(atxdmac->dma.dev,
1742 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1743 __func__, status, imr, pending);
1744
1745 if (!pending)
1746 break;
1747
1748
1749 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1750 if (!((1 << i) & pending))
1751 continue;
1752
1753 atchan = &atxdmac->chan[i];
1754 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1755 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1756 atchan->irq_status = chan_status & chan_imr;
1757 dev_vdbg(atxdmac->dma.dev,
1758 "%s: chan%d: imr=0x%x, status=0x%x\n",
1759 __func__, i, chan_imr, chan_status);
1760 dev_vdbg(chan2dev(&atchan->chan),
1761 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1762 __func__,
1763 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1764 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1765 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1766 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1767 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1768 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1769
1770 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1771 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1772
1773 tasklet_schedule(&atchan->tasklet);
1774 ret = IRQ_HANDLED;
1775 }
1776
1777 } while (pending);
1778
1779 return ret;
1780 }
1781
1782 static void at_xdmac_issue_pending(struct dma_chan *chan)
1783 {
1784 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1785 unsigned long flags;
1786
1787 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1788
1789 spin_lock_irqsave(&atchan->lock, flags);
1790 at_xdmac_advance_work(atchan);
1791 spin_unlock_irqrestore(&atchan->lock, flags);
1792
1793 return;
1794 }
1795
1796 static int at_xdmac_device_config(struct dma_chan *chan,
1797 struct dma_slave_config *config)
1798 {
1799 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1800 int ret;
1801 unsigned long flags;
1802
1803 dev_dbg(chan2dev(chan), "%s\n", __func__);
1804
1805 spin_lock_irqsave(&atchan->lock, flags);
1806 ret = at_xdmac_set_slave_config(chan, config);
1807 spin_unlock_irqrestore(&atchan->lock, flags);
1808
1809 return ret;
1810 }
1811
1812 static int at_xdmac_device_pause(struct dma_chan *chan)
1813 {
1814 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1815 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1816 unsigned long flags;
1817
1818 dev_dbg(chan2dev(chan), "%s\n", __func__);
1819
1820 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1821 return 0;
1822
1823 spin_lock_irqsave(&atchan->lock, flags);
1824 at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1825 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1826 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1827 cpu_relax();
1828 spin_unlock_irqrestore(&atchan->lock, flags);
1829
1830 return 0;
1831 }
1832
1833 static int at_xdmac_device_resume(struct dma_chan *chan)
1834 {
1835 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1836 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1837 unsigned long flags;
1838
1839 dev_dbg(chan2dev(chan), "%s\n", __func__);
1840
1841 spin_lock_irqsave(&atchan->lock, flags);
1842 if (!at_xdmac_chan_is_paused(atchan)) {
1843 spin_unlock_irqrestore(&atchan->lock, flags);
1844 return 0;
1845 }
1846
1847 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1848 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1849 spin_unlock_irqrestore(&atchan->lock, flags);
1850
1851 return 0;
1852 }
1853
1854 static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1855 {
1856 struct at_xdmac_desc *desc, *_desc;
1857 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1858 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1859 unsigned long flags;
1860
1861 dev_dbg(chan2dev(chan), "%s\n", __func__);
1862
1863 spin_lock_irqsave(&atchan->lock, flags);
1864 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1865 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1866 cpu_relax();
1867
1868
1869 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
1870 list_del(&desc->xfer_node);
1871 list_splice_tail_init(&desc->descs_list,
1872 &atchan->free_descs_list);
1873 }
1874
1875 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1876 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1877 spin_unlock_irqrestore(&atchan->lock, flags);
1878
1879 return 0;
1880 }
1881
1882 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1883 {
1884 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1885 struct at_xdmac_desc *desc;
1886 int i;
1887
1888 if (at_xdmac_chan_is_enabled(atchan)) {
1889 dev_err(chan2dev(chan),
1890 "can't allocate channel resources (channel enabled)\n");
1891 return -EIO;
1892 }
1893
1894 if (!list_empty(&atchan->free_descs_list)) {
1895 dev_err(chan2dev(chan),
1896 "can't allocate channel resources (channel not free from a previous use)\n");
1897 return -EIO;
1898 }
1899
1900 for (i = 0; i < init_nr_desc_per_channel; i++) {
1901 desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
1902 if (!desc) {
1903 if (i == 0) {
1904 dev_warn(chan2dev(chan),
1905 "can't allocate any descriptors\n");
1906 return -EIO;
1907 }
1908 dev_warn(chan2dev(chan),
1909 "only %d descriptors have been allocated\n", i);
1910 break;
1911 }
1912 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1913 }
1914
1915 dma_cookie_init(chan);
1916
1917 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1918
1919 return i;
1920 }
1921
1922 static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1923 {
1924 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1925 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1926 struct at_xdmac_desc *desc, *_desc;
1927
1928 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1929 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1930 list_del(&desc->desc_node);
1931 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1932 }
1933
1934 return;
1935 }
1936
1937 static void at_xdmac_axi_config(struct platform_device *pdev)
1938 {
1939 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
1940 bool dev_m2m = false;
1941 u32 dma_requests;
1942
1943 if (!atxdmac->layout->axi_config)
1944 return;
1945
1946 if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
1947 &dma_requests)) {
1948 dev_info(&pdev->dev, "controller in mem2mem mode.\n");
1949 dev_m2m = true;
1950 }
1951
1952 if (dev_m2m) {
1953 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
1954 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
1955 } else {
1956 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
1957 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
1958 }
1959 }
1960
1961 static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
1962 {
1963 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
1964 struct dma_chan *chan, *_chan;
1965
1966 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1967 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1968
1969
1970 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1971 return -EAGAIN;
1972 }
1973 return 0;
1974 }
1975
1976 static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
1977 {
1978 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
1979 struct dma_chan *chan, *_chan;
1980
1981 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1982 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1983
1984 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1985 if (at_xdmac_chan_is_cyclic(atchan)) {
1986 if (!at_xdmac_chan_is_paused(atchan))
1987 at_xdmac_device_pause(chan);
1988 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1989 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1990 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1991 }
1992 }
1993 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1994
1995 at_xdmac_off(atxdmac);
1996 clk_disable_unprepare(atxdmac->clk);
1997 return 0;
1998 }
1999
2000 static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2001 {
2002 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2003 struct at_xdmac_chan *atchan;
2004 struct dma_chan *chan, *_chan;
2005 struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2006 int i;
2007 int ret;
2008
2009 ret = clk_prepare_enable(atxdmac->clk);
2010 if (ret)
2011 return ret;
2012
2013 at_xdmac_axi_config(pdev);
2014
2015
2016 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2017 atchan = &atxdmac->chan[i];
2018 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2019 cpu_relax();
2020 }
2021
2022 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2023 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2024 atchan = to_at_xdmac_chan(chan);
2025 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2026 if (at_xdmac_chan_is_cyclic(atchan)) {
2027 if (at_xdmac_chan_is_paused(atchan))
2028 at_xdmac_device_resume(chan);
2029 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2030 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2031 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2032 wmb();
2033 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2034 }
2035 }
2036 return 0;
2037 }
2038
2039 static int at_xdmac_probe(struct platform_device *pdev)
2040 {
2041 struct at_xdmac *atxdmac;
2042 int irq, nr_channels, i, ret;
2043 void __iomem *base;
2044 u32 reg;
2045
2046 irq = platform_get_irq(pdev, 0);
2047 if (irq < 0)
2048 return irq;
2049
2050 base = devm_platform_ioremap_resource(pdev, 0);
2051 if (IS_ERR(base))
2052 return PTR_ERR(base);
2053
2054
2055
2056
2057
2058
2059 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2060 nr_channels = AT_XDMAC_NB_CH(reg);
2061 if (nr_channels > AT_XDMAC_MAX_CHAN) {
2062 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2063 nr_channels);
2064 return -EINVAL;
2065 }
2066
2067 atxdmac = devm_kzalloc(&pdev->dev,
2068 struct_size(atxdmac, chan, nr_channels),
2069 GFP_KERNEL);
2070 if (!atxdmac) {
2071 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2072 return -ENOMEM;
2073 }
2074
2075 atxdmac->regs = base;
2076 atxdmac->irq = irq;
2077
2078 atxdmac->layout = of_device_get_match_data(&pdev->dev);
2079 if (!atxdmac->layout)
2080 return -ENODEV;
2081
2082 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2083 if (IS_ERR(atxdmac->clk)) {
2084 dev_err(&pdev->dev, "can't get dma_clk\n");
2085 return PTR_ERR(atxdmac->clk);
2086 }
2087
2088
2089 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2090 if (ret) {
2091 dev_err(&pdev->dev, "can't request irq\n");
2092 return ret;
2093 }
2094
2095 ret = clk_prepare_enable(atxdmac->clk);
2096 if (ret) {
2097 dev_err(&pdev->dev, "can't prepare or enable clock\n");
2098 goto err_free_irq;
2099 }
2100
2101 atxdmac->at_xdmac_desc_pool =
2102 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2103 sizeof(struct at_xdmac_desc), 4, 0);
2104 if (!atxdmac->at_xdmac_desc_pool) {
2105 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2106 ret = -ENOMEM;
2107 goto err_clk_disable;
2108 }
2109
2110 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2111 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2112 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2113 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2114 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2115 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2116
2117
2118
2119
2120 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2121 atxdmac->dma.dev = &pdev->dev;
2122 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2123 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2124 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2125 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2126 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2127 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2128 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2129 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2130 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2131 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2132 atxdmac->dma.device_config = at_xdmac_device_config;
2133 atxdmac->dma.device_pause = at_xdmac_device_pause;
2134 atxdmac->dma.device_resume = at_xdmac_device_resume;
2135 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2136 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2137 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2138 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2139 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2140
2141
2142 at_xdmac_off(atxdmac);
2143
2144
2145 INIT_LIST_HEAD(&atxdmac->dma.channels);
2146 for (i = 0; i < nr_channels; i++) {
2147 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2148
2149 atchan->chan.device = &atxdmac->dma;
2150 list_add_tail(&atchan->chan.device_node,
2151 &atxdmac->dma.channels);
2152
2153 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2154 atchan->mask = 1 << i;
2155
2156 spin_lock_init(&atchan->lock);
2157 INIT_LIST_HEAD(&atchan->xfers_list);
2158 INIT_LIST_HEAD(&atchan->free_descs_list);
2159 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2160
2161
2162 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2163 cpu_relax();
2164 }
2165 platform_set_drvdata(pdev, atxdmac);
2166
2167 ret = dma_async_device_register(&atxdmac->dma);
2168 if (ret) {
2169 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2170 goto err_clk_disable;
2171 }
2172
2173 ret = of_dma_controller_register(pdev->dev.of_node,
2174 at_xdmac_xlate, atxdmac);
2175 if (ret) {
2176 dev_err(&pdev->dev, "could not register of dma controller\n");
2177 goto err_dma_unregister;
2178 }
2179
2180 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2181 nr_channels, atxdmac->regs);
2182
2183 at_xdmac_axi_config(pdev);
2184
2185 return 0;
2186
2187 err_dma_unregister:
2188 dma_async_device_unregister(&atxdmac->dma);
2189 err_clk_disable:
2190 clk_disable_unprepare(atxdmac->clk);
2191 err_free_irq:
2192 free_irq(atxdmac->irq, atxdmac);
2193 return ret;
2194 }
2195
2196 static int at_xdmac_remove(struct platform_device *pdev)
2197 {
2198 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2199 int i;
2200
2201 at_xdmac_off(atxdmac);
2202 of_dma_controller_free(pdev->dev.of_node);
2203 dma_async_device_unregister(&atxdmac->dma);
2204 clk_disable_unprepare(atxdmac->clk);
2205
2206 free_irq(atxdmac->irq, atxdmac);
2207
2208 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2209 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2210
2211 tasklet_kill(&atchan->tasklet);
2212 at_xdmac_free_chan_resources(&atchan->chan);
2213 }
2214
2215 return 0;
2216 }
2217
2218 static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2219 .prepare = atmel_xdmac_prepare,
2220 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2221 };
2222
2223 static const struct of_device_id atmel_xdmac_dt_ids[] = {
2224 {
2225 .compatible = "atmel,sama5d4-dma",
2226 .data = &at_xdmac_sama5d4_layout,
2227 }, {
2228 .compatible = "microchip,sama7g5-dma",
2229 .data = &at_xdmac_sama7g5_layout,
2230 }, {
2231
2232 }
2233 };
2234 MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2235
2236 static struct platform_driver at_xdmac_driver = {
2237 .probe = at_xdmac_probe,
2238 .remove = at_xdmac_remove,
2239 .driver = {
2240 .name = "at_xdmac",
2241 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2242 .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2243 }
2244 };
2245
2246 static int __init at_xdmac_init(void)
2247 {
2248 return platform_driver_register(&at_xdmac_driver);
2249 }
2250 subsys_initcall(at_xdmac_init);
2251
2252 static void __exit at_xdmac_exit(void)
2253 {
2254 platform_driver_unregister(&at_xdmac_driver);
2255 }
2256 module_exit(at_xdmac_exit);
2257
2258 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2259 MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2260 MODULE_LICENSE("GPL");