0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/bits.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/refcount.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/dma-direction.h>
0013
0014 #include "gsi.h"
0015 #include "gsi_private.h"
0016 #include "gsi_trans.h"
0017 #include "ipa_gsi.h"
0018 #include "ipa_data.h"
0019 #include "ipa_cmd.h"
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 enum gsi_tre_type {
0070 GSI_RE_XFER = 0x2,
0071 GSI_RE_IMMD_CMD = 0x3,
0072 };
0073
0074
0075 struct gsi_tre {
0076 __le64 addr;
0077 __le16 len_opcode;
0078 __le16 reserved;
0079 __le32 flags;
0080 };
0081
0082
0083 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
0084 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
0085 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
0086 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
0087
0088 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
0089 u32 max_alloc)
0090 {
0091 void *virt;
0092
0093 if (!size)
0094 return -EINVAL;
0095 if (count < max_alloc)
0096 return -EINVAL;
0097 if (!max_alloc)
0098 return -EINVAL;
0099
0100
0101
0102
0103
0104
0105
0106
0107 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
0108 if (!virt)
0109 return -ENOMEM;
0110
0111 pool->base = virt;
0112
0113 pool->count = ksize(pool->base) / size;
0114 pool->free = 0;
0115 pool->max_alloc = max_alloc;
0116 pool->size = size;
0117 pool->addr = 0;
0118
0119 return 0;
0120 }
0121
0122 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
0123 {
0124 kfree(pool->base);
0125 memset(pool, 0, sizeof(*pool));
0126 }
0127
0128
0129
0130
0131
0132
0133
0134 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
0135 size_t size, u32 count, u32 max_alloc)
0136 {
0137 size_t total_size;
0138 dma_addr_t addr;
0139 void *virt;
0140
0141 if (!size)
0142 return -EINVAL;
0143 if (count < max_alloc)
0144 return -EINVAL;
0145 if (!max_alloc)
0146 return -EINVAL;
0147
0148
0149 size = __roundup_pow_of_two(size);
0150 total_size = (count + max_alloc - 1) * size;
0151
0152
0153
0154
0155
0156
0157
0158 total_size = get_order(total_size) << PAGE_SHIFT;
0159
0160 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
0161 if (!virt)
0162 return -ENOMEM;
0163
0164 pool->base = virt;
0165 pool->count = total_size / size;
0166 pool->free = 0;
0167 pool->size = size;
0168 pool->max_alloc = max_alloc;
0169 pool->addr = addr;
0170
0171 return 0;
0172 }
0173
0174 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
0175 {
0176 size_t total_size = pool->count * pool->size;
0177
0178 dma_free_coherent(dev, total_size, pool->base, pool->addr);
0179 memset(pool, 0, sizeof(*pool));
0180 }
0181
0182
0183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
0184 {
0185 u32 offset;
0186
0187 WARN_ON(!count);
0188 WARN_ON(count > pool->max_alloc);
0189
0190
0191 if (count > pool->count - pool->free)
0192 pool->free = 0;
0193
0194 offset = pool->free * pool->size;
0195 pool->free += count;
0196 memset(pool->base + offset, 0, count * pool->size);
0197
0198 return offset;
0199 }
0200
0201
0202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
0203 {
0204 return pool->base + gsi_trans_pool_alloc_common(pool, count);
0205 }
0206
0207
0208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
0209 {
0210 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
0211
0212 *addr = pool->addr + offset;
0213
0214 return pool->base + offset;
0215 }
0216
0217
0218 static void gsi_trans_map(struct gsi_trans *trans, u32 index)
0219 {
0220 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0221
0222
0223 index += trans->used_count - 1;
0224
0225
0226 channel->trans_info.map[index % channel->tre_ring.count] = trans;
0227 }
0228
0229
0230 struct gsi_trans *
0231 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
0232 {
0233
0234 return channel->trans_info.map[index % channel->tre_ring.count];
0235 }
0236
0237
0238 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
0239 {
0240 return list_first_entry_or_null(&channel->trans_info.complete,
0241 struct gsi_trans, links);
0242 }
0243
0244
0245 static void gsi_trans_move_committed(struct gsi_trans *trans)
0246 {
0247 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0248 struct gsi_trans_info *trans_info = &channel->trans_info;
0249
0250 spin_lock_bh(&trans_info->spinlock);
0251
0252 list_move_tail(&trans->links, &trans_info->committed);
0253
0254 spin_unlock_bh(&trans_info->spinlock);
0255 }
0256
0257
0258 static void gsi_trans_move_pending(struct gsi_trans *trans)
0259 {
0260 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0261 struct gsi_trans_info *trans_info = &channel->trans_info;
0262 struct list_head list;
0263
0264 spin_lock_bh(&trans_info->spinlock);
0265
0266
0267 list_cut_position(&list, &trans_info->committed, &trans->links);
0268 list_splice_tail(&list, &trans_info->pending);
0269
0270 spin_unlock_bh(&trans_info->spinlock);
0271 }
0272
0273
0274
0275
0276 void gsi_trans_move_complete(struct gsi_trans *trans)
0277 {
0278 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0279 struct gsi_trans_info *trans_info = &channel->trans_info;
0280 struct list_head list;
0281
0282 spin_lock_bh(&trans_info->spinlock);
0283
0284
0285 list_cut_position(&list, &trans_info->pending, &trans->links);
0286 list_splice_tail(&list, &trans_info->complete);
0287
0288 spin_unlock_bh(&trans_info->spinlock);
0289 }
0290
0291
0292 void gsi_trans_move_polled(struct gsi_trans *trans)
0293 {
0294 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0295 struct gsi_trans_info *trans_info = &channel->trans_info;
0296
0297 spin_lock_bh(&trans_info->spinlock);
0298
0299 list_move_tail(&trans->links, &trans_info->polled);
0300
0301 spin_unlock_bh(&trans_info->spinlock);
0302 }
0303
0304
0305 static bool
0306 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
0307 {
0308 int avail = atomic_read(&trans_info->tre_avail);
0309 int new;
0310
0311 do {
0312 new = avail - (int)tre_count;
0313 if (unlikely(new < 0))
0314 return false;
0315 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
0316
0317 return true;
0318 }
0319
0320
0321 static void
0322 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
0323 {
0324 atomic_add(tre_count, &trans_info->tre_avail);
0325 }
0326
0327
0328 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
0329 {
0330 u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
0331 struct gsi_trans_info *trans_info;
0332
0333 trans_info = &gsi->channel[channel_id].trans_info;
0334
0335 return atomic_read(&trans_info->tre_avail) == tre_max;
0336 }
0337
0338
0339 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
0340 u32 tre_count,
0341 enum dma_data_direction direction)
0342 {
0343 struct gsi_channel *channel = &gsi->channel[channel_id];
0344 struct gsi_trans_info *trans_info;
0345 struct gsi_trans *trans;
0346
0347 if (WARN_ON(tre_count > channel->trans_tre_max))
0348 return NULL;
0349
0350 trans_info = &channel->trans_info;
0351
0352
0353
0354
0355 if (!gsi_trans_tre_reserve(trans_info, tre_count))
0356 return NULL;
0357
0358
0359 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
0360 trans->gsi = gsi;
0361 trans->channel_id = channel_id;
0362 trans->rsvd_count = tre_count;
0363 init_completion(&trans->completion);
0364
0365
0366 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
0367 sg_init_marker(trans->sgl, tre_count);
0368
0369 trans->direction = direction;
0370
0371 spin_lock_bh(&trans_info->spinlock);
0372
0373 list_add_tail(&trans->links, &trans_info->alloc);
0374
0375 spin_unlock_bh(&trans_info->spinlock);
0376
0377 refcount_set(&trans->refcount, 1);
0378
0379 return trans;
0380 }
0381
0382
0383 void gsi_trans_free(struct gsi_trans *trans)
0384 {
0385 refcount_t *refcount = &trans->refcount;
0386 struct gsi_trans_info *trans_info;
0387 bool last;
0388
0389
0390 if (refcount_dec_not_one(refcount))
0391 return;
0392
0393 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
0394
0395 spin_lock_bh(&trans_info->spinlock);
0396
0397
0398 last = refcount_dec_and_test(refcount);
0399 if (last)
0400 list_del(&trans->links);
0401
0402 spin_unlock_bh(&trans_info->spinlock);
0403
0404 if (!last)
0405 return;
0406
0407 if (trans->used_count)
0408 ipa_gsi_trans_release(trans);
0409
0410
0411
0412
0413 gsi_trans_tre_release(trans_info, trans->rsvd_count);
0414 }
0415
0416
0417 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
0418 dma_addr_t addr, enum ipa_cmd_opcode opcode)
0419 {
0420 u32 which = trans->used_count++;
0421 struct scatterlist *sg;
0422
0423 WARN_ON(which >= trans->rsvd_count);
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439 sg = &trans->sgl[which];
0440 sg_assign_page(sg, NULL);
0441 sg_dma_address(sg) = addr;
0442 sg_dma_len(sg) = size;
0443
0444 trans->cmd_opcode[which] = opcode;
0445 }
0446
0447
0448 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
0449 u32 offset)
0450 {
0451 struct scatterlist *sg = &trans->sgl[0];
0452 int ret;
0453
0454 if (WARN_ON(trans->rsvd_count != 1))
0455 return -EINVAL;
0456 if (WARN_ON(trans->used_count))
0457 return -EINVAL;
0458
0459 sg_set_page(sg, page, size, offset);
0460 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
0461 if (!ret)
0462 return -ENOMEM;
0463
0464 trans->used_count++;
0465
0466 return 0;
0467 }
0468
0469
0470 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
0471 {
0472 struct scatterlist *sg = &trans->sgl[0];
0473 u32 used_count;
0474 int ret;
0475
0476 if (WARN_ON(trans->rsvd_count != 1))
0477 return -EINVAL;
0478 if (WARN_ON(trans->used_count))
0479 return -EINVAL;
0480
0481
0482 ret = skb_to_sgvec(skb, sg, 0, skb->len);
0483 if (ret < 0)
0484 return ret;
0485 used_count = ret;
0486
0487 ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
0488 if (!ret)
0489 return -ENOMEM;
0490
0491
0492 trans->used_count += used_count;
0493
0494 return 0;
0495 }
0496
0497
0498 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
0499 {
0500 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
0501 : cpu_to_le16((u16)opcode);
0502 }
0503
0504
0505 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
0506 {
0507 enum gsi_tre_type tre_type;
0508 u32 tre_flags;
0509
0510 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
0511 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
0512
0513
0514 if (last_tre) {
0515
0516 tre_flags |= TRE_FLAGS_IEOT_FMASK;
0517
0518 if (bei)
0519 tre_flags |= TRE_FLAGS_BEI_FMASK;
0520 } else {
0521 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
0522 }
0523
0524 return cpu_to_le32(tre_flags);
0525 }
0526
0527 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
0528 u32 len, bool last_tre, bool bei,
0529 enum ipa_cmd_opcode opcode)
0530 {
0531 struct gsi_tre tre;
0532
0533 tre.addr = cpu_to_le64(addr);
0534 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
0535 tre.reserved = 0;
0536 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
0537
0538
0539
0540
0541 *dest_tre = tre;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
0556 {
0557 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0558 struct gsi_ring *tre_ring = &channel->tre_ring;
0559 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
0560 bool bei = channel->toward_ipa;
0561 struct gsi_tre *dest_tre;
0562 struct scatterlist *sg;
0563 u32 byte_count = 0;
0564 u8 *cmd_opcode;
0565 u32 avail;
0566 u32 i;
0567
0568 WARN_ON(!trans->used_count);
0569
0570
0571
0572
0573
0574
0575 cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
0576 avail = tre_ring->count - tre_ring->index % tre_ring->count;
0577 dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
0578 for_each_sg(trans->sgl, sg, trans->used_count, i) {
0579 bool last_tre = i == trans->used_count - 1;
0580 dma_addr_t addr = sg_dma_address(sg);
0581 u32 len = sg_dma_len(sg);
0582
0583 byte_count += len;
0584 if (!avail--)
0585 dest_tre = gsi_ring_virt(tre_ring, 0);
0586 if (cmd_opcode)
0587 opcode = *cmd_opcode++;
0588
0589 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
0590 dest_tre++;
0591 }
0592
0593 gsi_trans_map(trans, tre_ring->index);
0594
0595 tre_ring->index += trans->used_count;
0596
0597 trans->len = byte_count;
0598 if (channel->toward_ipa)
0599 gsi_trans_tx_committed(trans);
0600
0601 gsi_trans_move_committed(trans);
0602
0603
0604 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
0605
0606 if (channel->toward_ipa)
0607 gsi_trans_tx_queued(trans);
0608 gsi_trans_move_pending(trans);
0609 gsi_channel_doorbell(channel);
0610 }
0611 }
0612
0613
0614 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
0615 {
0616 if (trans->used_count)
0617 __gsi_trans_commit(trans, ring_db);
0618 else
0619 gsi_trans_free(trans);
0620 }
0621
0622
0623 void gsi_trans_commit_wait(struct gsi_trans *trans)
0624 {
0625 if (!trans->used_count)
0626 goto out_trans_free;
0627
0628 refcount_inc(&trans->refcount);
0629
0630 __gsi_trans_commit(trans, true);
0631
0632 wait_for_completion(&trans->completion);
0633
0634 out_trans_free:
0635 gsi_trans_free(trans);
0636 }
0637
0638
0639 void gsi_trans_complete(struct gsi_trans *trans)
0640 {
0641
0642 if (trans->direction != DMA_NONE)
0643 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
0644 trans->direction);
0645
0646 ipa_gsi_trans_complete(trans);
0647
0648 complete(&trans->completion);
0649
0650 gsi_trans_free(trans);
0651 }
0652
0653
0654 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
0655 {
0656 struct gsi_trans_info *trans_info = &channel->trans_info;
0657 struct gsi_trans *trans;
0658 bool cancelled;
0659
0660
0661 spin_lock_bh(&trans_info->spinlock);
0662
0663 cancelled = !list_empty(&trans_info->pending);
0664 list_for_each_entry(trans, &trans_info->pending, links)
0665 trans->cancelled = true;
0666
0667 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
0668
0669 spin_unlock_bh(&trans_info->spinlock);
0670
0671
0672 if (cancelled)
0673 napi_schedule(&channel->napi);
0674 }
0675
0676
0677 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
0678 {
0679 struct gsi_channel *channel = &gsi->channel[channel_id];
0680 struct gsi_ring *tre_ring = &channel->tre_ring;
0681 struct gsi_trans_info *trans_info;
0682 struct gsi_tre *dest_tre;
0683
0684 trans_info = &channel->trans_info;
0685
0686
0687 if (!gsi_trans_tre_reserve(trans_info, 1))
0688 return -EBUSY;
0689
0690
0691
0692 dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
0693 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
0694
0695 tre_ring->index++;
0696 gsi_channel_doorbell(channel);
0697
0698 return 0;
0699 }
0700
0701
0702 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
0703 {
0704 struct gsi_channel *channel = &gsi->channel[channel_id];
0705
0706 gsi_trans_tre_release(&channel->trans_info, 1);
0707 }
0708
0709
0710 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
0711 {
0712 struct gsi_channel *channel = &gsi->channel[channel_id];
0713 u32 tre_count = channel->tre_count;
0714 struct gsi_trans_info *trans_info;
0715 u32 tre_max;
0716 int ret;
0717
0718
0719 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
0720
0721 trans_info = &channel->trans_info;
0722
0723
0724
0725
0726
0727
0728 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
0729 atomic_set(&trans_info->tre_avail, tre_max);
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
0740 tre_max, 1);
0741 if (ret)
0742 return -ENOMEM;
0743
0744
0745
0746
0747
0748
0749 trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
0750 GFP_KERNEL);
0751 if (!trans_info->map) {
0752 ret = -ENOMEM;
0753 goto err_trans_free;
0754 }
0755
0756
0757
0758
0759
0760
0761
0762 ret = gsi_trans_pool_init(&trans_info->sg_pool,
0763 sizeof(struct scatterlist),
0764 tre_max, channel->trans_tre_max);
0765 if (ret)
0766 goto err_map_free;
0767
0768 spin_lock_init(&trans_info->spinlock);
0769 INIT_LIST_HEAD(&trans_info->alloc);
0770 INIT_LIST_HEAD(&trans_info->committed);
0771 INIT_LIST_HEAD(&trans_info->pending);
0772 INIT_LIST_HEAD(&trans_info->complete);
0773 INIT_LIST_HEAD(&trans_info->polled);
0774
0775 return 0;
0776
0777 err_map_free:
0778 kfree(trans_info->map);
0779 err_trans_free:
0780 gsi_trans_pool_exit(&trans_info->pool);
0781
0782 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
0783 ret, channel_id);
0784
0785 return ret;
0786 }
0787
0788
0789 void gsi_channel_trans_exit(struct gsi_channel *channel)
0790 {
0791 struct gsi_trans_info *trans_info = &channel->trans_info;
0792
0793 gsi_trans_pool_exit(&trans_info->sg_pool);
0794 gsi_trans_pool_exit(&trans_info->pool);
0795 kfree(trans_info->map);
0796 }