0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include "ptdma.h"
0013 #include "../dmaengine.h"
0014 #include "../virt-dma.h"
0015
0016 static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
0017 {
0018 return container_of(dma_chan, struct pt_dma_chan, vc.chan);
0019 }
0020
0021 static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
0022 {
0023 return container_of(vd, struct pt_dma_desc, vd);
0024 }
0025
0026 static void pt_free_chan_resources(struct dma_chan *dma_chan)
0027 {
0028 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0029
0030 vchan_free_chan_resources(&chan->vc);
0031 }
0032
0033 static void pt_synchronize(struct dma_chan *dma_chan)
0034 {
0035 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0036
0037 vchan_synchronize(&chan->vc);
0038 }
0039
0040 static void pt_do_cleanup(struct virt_dma_desc *vd)
0041 {
0042 struct pt_dma_desc *desc = to_pt_desc(vd);
0043 struct pt_device *pt = desc->pt;
0044
0045 kmem_cache_free(pt->dma_desc_cache, desc);
0046 }
0047
0048 static int pt_dma_start_desc(struct pt_dma_desc *desc)
0049 {
0050 struct pt_passthru_engine *pt_engine;
0051 struct pt_device *pt;
0052 struct pt_cmd *pt_cmd;
0053 struct pt_cmd_queue *cmd_q;
0054
0055 desc->issued_to_hw = 1;
0056
0057 pt_cmd = &desc->pt_cmd;
0058 pt = pt_cmd->pt;
0059 cmd_q = &pt->cmd_q;
0060 pt_engine = &pt_cmd->passthru;
0061
0062 pt->tdata.cmd = pt_cmd;
0063
0064
0065 pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
0066
0067 return 0;
0068 }
0069
0070 static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
0071 {
0072
0073 struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
0074
0075 return vd ? to_pt_desc(vd) : NULL;
0076 }
0077
0078 static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
0079 struct pt_dma_desc *desc)
0080 {
0081 struct dma_async_tx_descriptor *tx_desc;
0082 struct virt_dma_desc *vd;
0083 unsigned long flags;
0084
0085
0086 do {
0087 if (desc) {
0088 if (!desc->issued_to_hw) {
0089
0090 if (desc->status != DMA_ERROR)
0091 return desc;
0092 }
0093
0094 tx_desc = &desc->vd.tx;
0095 vd = &desc->vd;
0096 } else {
0097 tx_desc = NULL;
0098 }
0099
0100 spin_lock_irqsave(&chan->vc.lock, flags);
0101
0102 if (desc) {
0103 if (desc->status != DMA_COMPLETE) {
0104 if (desc->status != DMA_ERROR)
0105 desc->status = DMA_COMPLETE;
0106
0107 dma_cookie_complete(tx_desc);
0108 dma_descriptor_unmap(tx_desc);
0109 list_del(&desc->vd.node);
0110 } else {
0111
0112 tx_desc = NULL;
0113 }
0114 }
0115
0116 desc = pt_next_dma_desc(chan);
0117
0118 spin_unlock_irqrestore(&chan->vc.lock, flags);
0119
0120 if (tx_desc) {
0121 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
0122 dma_run_dependencies(tx_desc);
0123 vchan_vdesc_fini(vd);
0124 }
0125 } while (desc);
0126
0127 return NULL;
0128 }
0129
0130 static void pt_cmd_callback(void *data, int err)
0131 {
0132 struct pt_dma_desc *desc = data;
0133 struct dma_chan *dma_chan;
0134 struct pt_dma_chan *chan;
0135 int ret;
0136
0137 if (err == -EINPROGRESS)
0138 return;
0139
0140 dma_chan = desc->vd.tx.chan;
0141 chan = to_pt_chan(dma_chan);
0142
0143 if (err)
0144 desc->status = DMA_ERROR;
0145
0146 while (true) {
0147
0148 desc = pt_handle_active_desc(chan, desc);
0149
0150
0151 if (!desc)
0152 break;
0153
0154 ret = pt_dma_start_desc(desc);
0155 if (!ret)
0156 break;
0157
0158 desc->status = DMA_ERROR;
0159 }
0160 }
0161
0162 static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
0163 unsigned long flags)
0164 {
0165 struct pt_dma_desc *desc;
0166
0167 desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
0168 if (!desc)
0169 return NULL;
0170
0171 vchan_tx_prep(&chan->vc, &desc->vd, flags);
0172
0173 desc->pt = chan->pt;
0174 desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
0175 desc->issued_to_hw = 0;
0176 desc->status = DMA_IN_PROGRESS;
0177
0178 return desc;
0179 }
0180
0181 static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
0182 dma_addr_t dst,
0183 dma_addr_t src,
0184 unsigned int len,
0185 unsigned long flags)
0186 {
0187 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0188 struct pt_passthru_engine *pt_engine;
0189 struct pt_dma_desc *desc;
0190 struct pt_cmd *pt_cmd;
0191
0192 desc = pt_alloc_dma_desc(chan, flags);
0193 if (!desc)
0194 return NULL;
0195
0196 pt_cmd = &desc->pt_cmd;
0197 pt_cmd->pt = chan->pt;
0198 pt_engine = &pt_cmd->passthru;
0199 pt_cmd->engine = PT_ENGINE_PASSTHRU;
0200 pt_engine->src_dma = src;
0201 pt_engine->dst_dma = dst;
0202 pt_engine->src_len = len;
0203 pt_cmd->pt_cmd_callback = pt_cmd_callback;
0204 pt_cmd->data = desc;
0205
0206 desc->len = len;
0207
0208 return desc;
0209 }
0210
0211 static struct dma_async_tx_descriptor *
0212 pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
0213 dma_addr_t src, size_t len, unsigned long flags)
0214 {
0215 struct pt_dma_desc *desc;
0216
0217 desc = pt_create_desc(dma_chan, dst, src, len, flags);
0218 if (!desc)
0219 return NULL;
0220
0221 return &desc->vd.tx;
0222 }
0223
0224 static struct dma_async_tx_descriptor *
0225 pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
0226 {
0227 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0228 struct pt_dma_desc *desc;
0229
0230 desc = pt_alloc_dma_desc(chan, flags);
0231 if (!desc)
0232 return NULL;
0233
0234 return &desc->vd.tx;
0235 }
0236
0237 static void pt_issue_pending(struct dma_chan *dma_chan)
0238 {
0239 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0240 struct pt_dma_desc *desc;
0241 unsigned long flags;
0242 bool engine_is_idle = true;
0243
0244 spin_lock_irqsave(&chan->vc.lock, flags);
0245
0246 desc = pt_next_dma_desc(chan);
0247 if (desc)
0248 engine_is_idle = false;
0249
0250 vchan_issue_pending(&chan->vc);
0251
0252 desc = pt_next_dma_desc(chan);
0253
0254 spin_unlock_irqrestore(&chan->vc.lock, flags);
0255
0256
0257 if (engine_is_idle)
0258 pt_cmd_callback(desc, 0);
0259 }
0260
0261 static enum dma_status
0262 pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
0263 struct dma_tx_state *txstate)
0264 {
0265 struct pt_device *pt = to_pt_chan(c)->pt;
0266 struct pt_cmd_queue *cmd_q = &pt->cmd_q;
0267
0268 pt_check_status_trans(pt, cmd_q);
0269 return dma_cookie_status(c, cookie, txstate);
0270 }
0271
0272 static int pt_pause(struct dma_chan *dma_chan)
0273 {
0274 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0275 unsigned long flags;
0276
0277 spin_lock_irqsave(&chan->vc.lock, flags);
0278 pt_stop_queue(&chan->pt->cmd_q);
0279 spin_unlock_irqrestore(&chan->vc.lock, flags);
0280
0281 return 0;
0282 }
0283
0284 static int pt_resume(struct dma_chan *dma_chan)
0285 {
0286 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0287 struct pt_dma_desc *desc = NULL;
0288 unsigned long flags;
0289
0290 spin_lock_irqsave(&chan->vc.lock, flags);
0291 pt_start_queue(&chan->pt->cmd_q);
0292 desc = pt_next_dma_desc(chan);
0293 spin_unlock_irqrestore(&chan->vc.lock, flags);
0294
0295
0296 if (desc)
0297 pt_cmd_callback(desc, 0);
0298
0299 return 0;
0300 }
0301
0302 static int pt_terminate_all(struct dma_chan *dma_chan)
0303 {
0304 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
0305 unsigned long flags;
0306 struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
0307 LIST_HEAD(head);
0308
0309 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
0310 spin_lock_irqsave(&chan->vc.lock, flags);
0311 vchan_get_all_descriptors(&chan->vc, &head);
0312 spin_unlock_irqrestore(&chan->vc.lock, flags);
0313
0314 vchan_dma_desc_free_list(&chan->vc, &head);
0315 vchan_free_chan_resources(&chan->vc);
0316
0317 return 0;
0318 }
0319
0320 int pt_dmaengine_register(struct pt_device *pt)
0321 {
0322 struct pt_dma_chan *chan;
0323 struct dma_device *dma_dev = &pt->dma_dev;
0324 char *cmd_cache_name;
0325 char *desc_cache_name;
0326 int ret;
0327
0328 pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
0329 GFP_KERNEL);
0330 if (!pt->pt_dma_chan)
0331 return -ENOMEM;
0332
0333 cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
0334 "%s-dmaengine-cmd-cache",
0335 dev_name(pt->dev));
0336 if (!cmd_cache_name)
0337 return -ENOMEM;
0338
0339 desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
0340 "%s-dmaengine-desc-cache",
0341 dev_name(pt->dev));
0342 if (!desc_cache_name) {
0343 ret = -ENOMEM;
0344 goto err_cache;
0345 }
0346
0347 pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
0348 sizeof(struct pt_dma_desc), 0,
0349 SLAB_HWCACHE_ALIGN, NULL);
0350 if (!pt->dma_desc_cache) {
0351 ret = -ENOMEM;
0352 goto err_cache;
0353 }
0354
0355 dma_dev->dev = pt->dev;
0356 dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
0357 dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
0358 dma_dev->directions = DMA_MEM_TO_MEM;
0359 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
0360 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
0361 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
0362
0363
0364
0365
0366
0367 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
0368
0369 INIT_LIST_HEAD(&dma_dev->channels);
0370
0371 chan = pt->pt_dma_chan;
0372 chan->pt = pt;
0373
0374
0375 dma_dev->device_free_chan_resources = pt_free_chan_resources;
0376 dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
0377 dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
0378 dma_dev->device_issue_pending = pt_issue_pending;
0379 dma_dev->device_tx_status = pt_tx_status;
0380 dma_dev->device_pause = pt_pause;
0381 dma_dev->device_resume = pt_resume;
0382 dma_dev->device_terminate_all = pt_terminate_all;
0383 dma_dev->device_synchronize = pt_synchronize;
0384
0385 chan->vc.desc_free = pt_do_cleanup;
0386 vchan_init(&chan->vc, dma_dev);
0387
0388 dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
0389
0390 ret = dma_async_device_register(dma_dev);
0391 if (ret)
0392 goto err_reg;
0393
0394 return 0;
0395
0396 err_reg:
0397 kmem_cache_destroy(pt->dma_desc_cache);
0398
0399 err_cache:
0400 kmem_cache_destroy(pt->dma_cmd_cache);
0401
0402 return ret;
0403 }
0404
0405 void pt_dmaengine_unregister(struct pt_device *pt)
0406 {
0407 struct dma_device *dma_dev = &pt->dma_dev;
0408
0409 dma_async_device_unregister(dma_dev);
0410
0411 kmem_cache_destroy(pt->dma_desc_cache);
0412 kmem_cache_destroy(pt->dma_cmd_cache);
0413 }