0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/dmaengine.h>
0009 #include <linux/slab.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/mm.h>
0012 #include <linux/highmem.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/delay.h>
0015 #include <linux/atomic.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/kfifo.h>
0018 #include <linux/bitops.h>
0019
0020 #include "hidma.h"
0021
0022 #define HIDMA_EVRE_SIZE 16
0023
0024 #define HIDMA_TRCA_CTRLSTS_REG 0x000
0025 #define HIDMA_TRCA_RING_LOW_REG 0x008
0026 #define HIDMA_TRCA_RING_HIGH_REG 0x00C
0027 #define HIDMA_TRCA_RING_LEN_REG 0x010
0028 #define HIDMA_TRCA_DOORBELL_REG 0x400
0029
0030 #define HIDMA_EVCA_CTRLSTS_REG 0x000
0031 #define HIDMA_EVCA_INTCTRL_REG 0x004
0032 #define HIDMA_EVCA_RING_LOW_REG 0x008
0033 #define HIDMA_EVCA_RING_HIGH_REG 0x00C
0034 #define HIDMA_EVCA_RING_LEN_REG 0x010
0035 #define HIDMA_EVCA_WRITE_PTR_REG 0x020
0036 #define HIDMA_EVCA_DOORBELL_REG 0x400
0037
0038 #define HIDMA_EVCA_IRQ_STAT_REG 0x100
0039 #define HIDMA_EVCA_IRQ_CLR_REG 0x108
0040 #define HIDMA_EVCA_IRQ_EN_REG 0x110
0041
0042 #define HIDMA_EVRE_CFG_IDX 0
0043
0044 #define HIDMA_EVRE_ERRINFO_BIT_POS 24
0045 #define HIDMA_EVRE_CODE_BIT_POS 28
0046
0047 #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
0048 #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
0049
0050 #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
0051 #define HIDMA_CH_STATE_MASK GENMASK(7, 0)
0052 #define HIDMA_CH_STATE_BIT_POS 0x8
0053
0054 #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
0055 #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
0056 #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
0057 #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
0058 #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
0059 #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
0060
0061 #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
0062 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
0063 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
0064 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
0065 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
0066 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
0067
0068 #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
0069 do { \
0070 iter += size; \
0071 if (iter >= ring_size) \
0072 iter -= ring_size; \
0073 } while (0)
0074
0075 #define HIDMA_CH_STATE(val) \
0076 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
0077
0078 #define HIDMA_ERR_INT_MASK \
0079 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
0080 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
0081 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
0082 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
0083 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
0084
0085 enum ch_command {
0086 HIDMA_CH_DISABLE = 0,
0087 HIDMA_CH_ENABLE = 1,
0088 HIDMA_CH_SUSPEND = 2,
0089 HIDMA_CH_RESET = 9,
0090 };
0091
0092 enum ch_state {
0093 HIDMA_CH_DISABLED = 0,
0094 HIDMA_CH_ENABLED = 1,
0095 HIDMA_CH_RUNNING = 2,
0096 HIDMA_CH_SUSPENDED = 3,
0097 HIDMA_CH_STOPPED = 4,
0098 };
0099
0100 enum err_code {
0101 HIDMA_EVRE_STATUS_COMPLETE = 1,
0102 HIDMA_EVRE_STATUS_ERROR = 4,
0103 };
0104
0105 static int hidma_is_chan_enabled(int state)
0106 {
0107 switch (state) {
0108 case HIDMA_CH_ENABLED:
0109 case HIDMA_CH_RUNNING:
0110 return true;
0111 default:
0112 return false;
0113 }
0114 }
0115
0116 void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
0117 {
0118 struct hidma_tre *tre;
0119
0120 if (tre_ch >= lldev->nr_tres) {
0121 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
0122 return;
0123 }
0124
0125 tre = &lldev->trepool[tre_ch];
0126 if (atomic_read(&tre->allocated) != true) {
0127 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
0128 return;
0129 }
0130
0131 atomic_set(&tre->allocated, 0);
0132 }
0133
0134 int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
0135 void (*callback)(void *data), void *data, u32 *tre_ch)
0136 {
0137 unsigned int i;
0138 struct hidma_tre *tre;
0139 u32 *tre_local;
0140
0141 if (!tre_ch || !lldev)
0142 return -EINVAL;
0143
0144
0145 for (i = 0; i < lldev->nr_tres - 1; i++) {
0146 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
0147 break;
0148 }
0149
0150 if (i == (lldev->nr_tres - 1))
0151 return -ENOMEM;
0152
0153 tre = &lldev->trepool[i];
0154 tre->dma_sig = sig;
0155 tre->dev_name = dev_name;
0156 tre->callback = callback;
0157 tre->data = data;
0158 tre->idx = i;
0159 tre->status = 0;
0160 tre->queued = 0;
0161 tre->err_code = 0;
0162 tre->err_info = 0;
0163 tre->lldev = lldev;
0164 tre_local = &tre->tre_local[0];
0165 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
0166 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);
0167 *tre_ch = i;
0168 if (callback)
0169 callback(data);
0170 return 0;
0171 }
0172
0173
0174
0175
0176 static void hidma_ll_tre_complete(struct tasklet_struct *t)
0177 {
0178 struct hidma_lldev *lldev = from_tasklet(lldev, t, task);
0179 struct hidma_tre *tre;
0180
0181 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
0182
0183 if (tre->callback)
0184 tre->callback(tre->data);
0185 }
0186 }
0187
0188 static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
0189 u8 err_code)
0190 {
0191 struct hidma_tre *tre;
0192 unsigned long flags;
0193 u32 tre_iterator;
0194
0195 spin_lock_irqsave(&lldev->lock, flags);
0196
0197 tre_iterator = lldev->tre_processed_off;
0198 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
0199 if (!tre) {
0200 spin_unlock_irqrestore(&lldev->lock, flags);
0201 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
0202 tre_iterator / HIDMA_TRE_SIZE);
0203 return -EINVAL;
0204 }
0205 lldev->pending_tre_list[tre->tre_index] = NULL;
0206
0207
0208
0209
0210
0211 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
0212 dev_warn(lldev->dev, "tre count mismatch on completion");
0213 atomic_set(&lldev->pending_tre_count, 0);
0214 }
0215
0216 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
0217 lldev->tre_ring_size);
0218 lldev->tre_processed_off = tre_iterator;
0219 spin_unlock_irqrestore(&lldev->lock, flags);
0220
0221 tre->err_info = err_info;
0222 tre->err_code = err_code;
0223 tre->queued = 0;
0224
0225 kfifo_put(&lldev->handoff_fifo, tre);
0226 tasklet_schedule(&lldev->task);
0227
0228 return 0;
0229 }
0230
0231
0232
0233
0234
0235
0236
0237 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
0238 {
0239 u32 evre_ring_size = lldev->evre_ring_size;
0240 u32 err_info, err_code, evre_write_off;
0241 u32 evre_iterator;
0242 u32 num_completed = 0;
0243
0244 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
0245 evre_iterator = lldev->evre_processed_off;
0246
0247 if ((evre_write_off > evre_ring_size) ||
0248 (evre_write_off % HIDMA_EVRE_SIZE)) {
0249 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
0250 return 0;
0251 }
0252
0253
0254
0255
0256
0257 while ((evre_iterator != evre_write_off)) {
0258 u32 *current_evre = lldev->evre_ring + evre_iterator;
0259 u32 cfg;
0260
0261 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
0262 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
0263 err_info &= HIDMA_EVRE_ERRINFO_MASK;
0264 err_code =
0265 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
0266
0267 if (hidma_post_completed(lldev, err_info, err_code))
0268 break;
0269
0270 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
0271 evre_ring_size);
0272
0273
0274
0275
0276
0277
0278 evre_write_off =
0279 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
0280 num_completed++;
0281
0282
0283
0284
0285
0286 if (!hidma_ll_isenabled(lldev))
0287 break;
0288 }
0289
0290 if (num_completed) {
0291 u32 evre_read_off = (lldev->evre_processed_off +
0292 HIDMA_EVRE_SIZE * num_completed);
0293 evre_read_off = evre_read_off % evre_ring_size;
0294 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
0295
0296
0297 lldev->evre_processed_off = evre_read_off;
0298 }
0299
0300 return num_completed;
0301 }
0302
0303 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
0304 u8 err_code)
0305 {
0306 while (atomic_read(&lldev->pending_tre_count)) {
0307 if (hidma_post_completed(lldev, err_info, err_code))
0308 break;
0309 }
0310 }
0311
0312 static int hidma_ll_reset(struct hidma_lldev *lldev)
0313 {
0314 u32 val;
0315 int ret;
0316
0317 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0318 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0319 val |= HIDMA_CH_RESET << 16;
0320 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0321
0322
0323
0324
0325
0326 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
0327 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
0328 1000, 10000);
0329 if (ret) {
0330 dev_err(lldev->dev, "transfer channel did not reset\n");
0331 return ret;
0332 }
0333
0334 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0335 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0336 val |= HIDMA_CH_RESET << 16;
0337 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0338
0339
0340
0341
0342
0343 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
0344 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
0345 1000, 10000);
0346 if (ret)
0347 return ret;
0348
0349 lldev->trch_state = HIDMA_CH_DISABLED;
0350 lldev->evch_state = HIDMA_CH_DISABLED;
0351 return 0;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
0387 {
0388 unsigned long irqflags;
0389
0390 if (cause & HIDMA_ERR_INT_MASK) {
0391 dev_err(lldev->dev, "error 0x%x, disabling...\n",
0392 cause);
0393
0394
0395 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0396
0397
0398 hidma_ll_disable(lldev);
0399
0400
0401 hidma_cleanup_pending_tre(lldev, 0xFF,
0402 HIDMA_EVRE_STATUS_ERROR);
0403
0404 return;
0405 }
0406
0407 spin_lock_irqsave(&lldev->lock, irqflags);
0408 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0409 spin_unlock_irqrestore(&lldev->lock, irqflags);
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421 hidma_handle_tre_completion(lldev);
0422 }
0423
0424 irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
0425 {
0426 struct hidma_lldev *lldev = arg;
0427 u32 status;
0428 u32 enable;
0429 u32 cause;
0430
0431 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
0432 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0433 cause = status & enable;
0434
0435 while (cause) {
0436 hidma_ll_int_handler_internal(lldev, cause);
0437
0438
0439
0440
0441
0442 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
0443 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0444 cause = status & enable;
0445 }
0446
0447 return IRQ_HANDLED;
0448 }
0449
0450 irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
0451 {
0452 struct hidma_lldev *lldev = arg;
0453
0454 hidma_ll_int_handler_internal(lldev, cause);
0455 return IRQ_HANDLED;
0456 }
0457
0458 int hidma_ll_enable(struct hidma_lldev *lldev)
0459 {
0460 u32 val;
0461 int ret;
0462
0463 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0464 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0465 val |= HIDMA_CH_ENABLE << 16;
0466 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0467
0468 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
0469 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
0470 1000, 10000);
0471 if (ret) {
0472 dev_err(lldev->dev, "event channel did not get enabled\n");
0473 return ret;
0474 }
0475
0476 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0477 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0478 val |= HIDMA_CH_ENABLE << 16;
0479 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0480
0481 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
0482 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
0483 1000, 10000);
0484 if (ret) {
0485 dev_err(lldev->dev, "transfer channel did not get enabled\n");
0486 return ret;
0487 }
0488
0489 lldev->trch_state = HIDMA_CH_ENABLED;
0490 lldev->evch_state = HIDMA_CH_ENABLED;
0491
0492
0493 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0494
0495 return 0;
0496 }
0497
0498 void hidma_ll_start(struct hidma_lldev *lldev)
0499 {
0500 unsigned long irqflags;
0501
0502 spin_lock_irqsave(&lldev->lock, irqflags);
0503 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
0504 spin_unlock_irqrestore(&lldev->lock, irqflags);
0505 }
0506
0507 bool hidma_ll_isenabled(struct hidma_lldev *lldev)
0508 {
0509 u32 val;
0510
0511 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0512 lldev->trch_state = HIDMA_CH_STATE(val);
0513 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0514 lldev->evch_state = HIDMA_CH_STATE(val);
0515
0516
0517 if (hidma_is_chan_enabled(lldev->trch_state) &&
0518 hidma_is_chan_enabled(lldev->evch_state))
0519 return true;
0520
0521 return false;
0522 }
0523
0524 void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
0525 {
0526 struct hidma_tre *tre;
0527 unsigned long flags;
0528
0529 tre = &lldev->trepool[tre_ch];
0530
0531
0532 spin_lock_irqsave(&lldev->lock, flags);
0533 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
0534 lldev->pending_tre_list[tre->tre_index] = tre;
0535 memcpy(lldev->tre_ring + lldev->tre_write_offset,
0536 &tre->tre_local[0], HIDMA_TRE_SIZE);
0537 tre->err_code = 0;
0538 tre->err_info = 0;
0539 tre->queued = 1;
0540 atomic_inc(&lldev->pending_tre_count);
0541 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
0542 % lldev->tre_ring_size;
0543 spin_unlock_irqrestore(&lldev->lock, flags);
0544 }
0545
0546
0547
0548
0549
0550
0551 int hidma_ll_disable(struct hidma_lldev *lldev)
0552 {
0553 u32 val;
0554 int ret;
0555
0556
0557 if (!hidma_ll_isenabled(lldev))
0558 return 0;
0559
0560 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0561 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0562 val |= HIDMA_CH_SUSPEND << 16;
0563 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
0564
0565
0566
0567
0568
0569 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
0570 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
0571 1000, 10000);
0572 if (ret)
0573 return ret;
0574
0575 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0576 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
0577 val |= HIDMA_CH_SUSPEND << 16;
0578 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
0579
0580
0581
0582
0583
0584 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
0585 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
0586 1000, 10000);
0587 if (ret)
0588 return ret;
0589
0590 lldev->trch_state = HIDMA_CH_SUSPENDED;
0591 lldev->evch_state = HIDMA_CH_SUSPENDED;
0592
0593
0594 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0595 return 0;
0596 }
0597
0598 void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
0599 dma_addr_t src, dma_addr_t dest, u32 len,
0600 u32 flags, u32 txntype)
0601 {
0602 struct hidma_tre *tre;
0603 u32 *tre_local;
0604
0605 if (tre_ch >= lldev->nr_tres) {
0606 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
0607 tre_ch);
0608 return;
0609 }
0610
0611 tre = &lldev->trepool[tre_ch];
0612 if (atomic_read(&tre->allocated) != true) {
0613 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
0614 tre_ch);
0615 return;
0616 }
0617
0618 tre_local = &tre->tre_local[0];
0619 tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
0620 tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
0621 tre_local[HIDMA_TRE_LEN_IDX] = len;
0622 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
0623 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
0624 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
0625 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
0626 tre->int_flags = flags;
0627 }
0628
0629
0630
0631
0632
0633 int hidma_ll_setup(struct hidma_lldev *lldev)
0634 {
0635 int rc;
0636 u64 addr;
0637 u32 val;
0638 u32 nr_tres = lldev->nr_tres;
0639
0640 atomic_set(&lldev->pending_tre_count, 0);
0641 lldev->tre_processed_off = 0;
0642 lldev->evre_processed_off = 0;
0643 lldev->tre_write_offset = 0;
0644
0645
0646 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0647
0648
0649 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
0650 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0651
0652 rc = hidma_ll_reset(lldev);
0653 if (rc)
0654 return rc;
0655
0656
0657
0658
0659
0660 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
0661 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0662
0663
0664 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0665
0666 addr = lldev->tre_dma;
0667 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
0668 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
0669 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
0670
0671 addr = lldev->evre_dma;
0672 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
0673 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
0674 writel(HIDMA_EVRE_SIZE * nr_tres,
0675 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
0676
0677
0678 hidma_ll_setup_irq(lldev, lldev->msi_support);
0679
0680 rc = hidma_ll_enable(lldev);
0681 if (rc)
0682 return rc;
0683
0684 return rc;
0685 }
0686
0687 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
0688 {
0689 u32 val;
0690
0691 lldev->msi_support = msi;
0692
0693
0694 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0695 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0696
0697
0698 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
0699 val &= ~0xF;
0700 if (!lldev->msi_support)
0701 val = val | 0x1;
0702 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
0703
0704
0705 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0706 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0707 }
0708
0709 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
0710 void __iomem *trca, void __iomem *evca,
0711 u8 chidx)
0712 {
0713 u32 required_bytes;
0714 struct hidma_lldev *lldev;
0715 int rc;
0716 size_t sz;
0717
0718 if (!trca || !evca || !dev || !nr_tres)
0719 return NULL;
0720
0721
0722 if (nr_tres < 4)
0723 return NULL;
0724
0725
0726 nr_tres += 1;
0727
0728 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
0729 if (!lldev)
0730 return NULL;
0731
0732 lldev->evca = evca;
0733 lldev->trca = trca;
0734 lldev->dev = dev;
0735 sz = sizeof(struct hidma_tre);
0736 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
0737 if (!lldev->trepool)
0738 return NULL;
0739
0740 required_bytes = sizeof(lldev->pending_tre_list[0]);
0741 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
0742 GFP_KERNEL);
0743 if (!lldev->pending_tre_list)
0744 return NULL;
0745
0746 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
0747 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
0748 GFP_KERNEL);
0749 if (!lldev->tre_ring)
0750 return NULL;
0751
0752 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
0753 lldev->nr_tres = nr_tres;
0754
0755
0756 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
0757 u8 tre_ring_shift;
0758
0759 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
0760 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
0761 lldev->tre_dma += tre_ring_shift;
0762 lldev->tre_ring += tre_ring_shift;
0763 }
0764
0765 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
0766 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
0767 GFP_KERNEL);
0768 if (!lldev->evre_ring)
0769 return NULL;
0770
0771 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
0772
0773
0774 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
0775 u8 evre_ring_shift;
0776
0777 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
0778 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
0779 lldev->evre_dma += evre_ring_shift;
0780 lldev->evre_ring += evre_ring_shift;
0781 }
0782 lldev->nr_tres = nr_tres;
0783 lldev->chidx = chidx;
0784
0785 sz = nr_tres * sizeof(struct hidma_tre *);
0786 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
0787 if (rc)
0788 return NULL;
0789
0790 rc = hidma_ll_setup(lldev);
0791 if (rc)
0792 return NULL;
0793
0794 spin_lock_init(&lldev->lock);
0795 tasklet_setup(&lldev->task, hidma_ll_tre_complete);
0796 lldev->initialized = 1;
0797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0798 return lldev;
0799 }
0800
0801 int hidma_ll_uninit(struct hidma_lldev *lldev)
0802 {
0803 u32 required_bytes;
0804 int rc = 0;
0805 u32 val;
0806
0807 if (!lldev)
0808 return -ENODEV;
0809
0810 if (!lldev->initialized)
0811 return 0;
0812
0813 lldev->initialized = 0;
0814
0815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
0816 tasklet_kill(&lldev->task);
0817 memset(lldev->trepool, 0, required_bytes);
0818 lldev->trepool = NULL;
0819 atomic_set(&lldev->pending_tre_count, 0);
0820 lldev->tre_write_offset = 0;
0821
0822 rc = hidma_ll_reset(lldev);
0823
0824
0825
0826
0827
0828 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
0829 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
0830 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
0831 return rc;
0832 }
0833
0834 enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
0835 {
0836 enum dma_status ret = DMA_ERROR;
0837 struct hidma_tre *tre;
0838 unsigned long flags;
0839 u8 err_code;
0840
0841 spin_lock_irqsave(&lldev->lock, flags);
0842
0843 tre = &lldev->trepool[tre_ch];
0844 err_code = tre->err_code;
0845
0846 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
0847 ret = DMA_COMPLETE;
0848 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
0849 ret = DMA_ERROR;
0850 else
0851 ret = DMA_IN_PROGRESS;
0852 spin_unlock_irqrestore(&lldev->lock, flags);
0853
0854 return ret;
0855 }