Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
0004  * Copyright (C) 2019-2020 Linaro Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include <linux/bits.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/refcount.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/dma-direction.h>
0013 
0014 #include "gsi.h"
0015 #include "gsi_private.h"
0016 #include "gsi_trans.h"
0017 #include "ipa_gsi.h"
0018 #include "ipa_data.h"
0019 #include "ipa_cmd.h"
0020 
0021 /**
0022  * DOC: GSI Transactions
0023  *
0024  * A GSI transaction abstracts the behavior of a GSI channel by representing
0025  * everything about a related group of IPA commands in a single structure.
0026  * (A "command" in this sense is either a data transfer or an IPA immediate
0027  * command.)  Most details of interaction with the GSI hardware are managed
0028  * by the GSI transaction core, allowing users to simply describe commands
0029  * to be performed.  When a transaction has completed a callback function
0030  * (dependent on the type of endpoint associated with the channel) allows
0031  * cleanup of resources associated with the transaction.
0032  *
0033  * To perform a command (or set of them), a user of the GSI transaction
0034  * interface allocates a transaction, indicating the number of TREs required
0035  * (one per command).  If sufficient TREs are available, they are reserved
0036  * for use in the transaction and the allocation succeeds.  This way
0037  * exhaustion of the available TREs in a channel ring is detected
0038  * as early as possible.  All resources required to complete a transaction
0039  * are allocated at transaction allocation time.
0040  *
0041  * Commands performed as part of a transaction are represented in an array
0042  * of Linux scatterlist structures.  This array is allocated with the
0043  * transaction, and its entries are initialized using standard scatterlist
0044  * functions (such as sg_set_buf() or skb_to_sgvec()).
0045  *
0046  * Once a transaction's scatterlist structures have been initialized, the
0047  * transaction is committed.  The caller is responsible for mapping buffers
0048  * for DMA if necessary, and this should be done *before* allocating
0049  * the transaction.  Between a successful allocation and commit of a
0050  * transaction no errors should occur.
0051  *
0052  * Committing transfers ownership of the entire transaction to the GSI
0053  * transaction core.  The GSI transaction code formats the content of
0054  * the scatterlist array into the channel ring buffer and informs the
0055  * hardware that new TREs are available to process.
0056  *
0057  * The last TRE in each transaction is marked to interrupt the AP when the
0058  * GSI hardware has completed it.  Because transfers described by TREs are
0059  * performed strictly in order, signaling the completion of just the last
0060  * TRE in the transaction is sufficient to indicate the full transaction
0061  * is complete.
0062  *
0063  * When a transaction is complete, ipa_gsi_trans_complete() is called by the
0064  * GSI code into the IPA layer, allowing it to perform any final cleanup
0065  * required before the transaction is freed.
0066  */
0067 
0068 /* Hardware values representing a transfer element type */
0069 enum gsi_tre_type {
0070     GSI_RE_XFER = 0x2,
0071     GSI_RE_IMMD_CMD = 0x3,
0072 };
0073 
0074 /* An entry in a channel ring */
0075 struct gsi_tre {
0076     __le64 addr;        /* DMA address */
0077     __le16 len_opcode;  /* length in bytes or enum IPA_CMD_* */
0078     __le16 reserved;
0079     __le32 flags;       /* TRE_FLAGS_* */
0080 };
0081 
0082 /* gsi_tre->flags mask values (in CPU byte order) */
0083 #define TRE_FLAGS_CHAIN_FMASK   GENMASK(0, 0)
0084 #define TRE_FLAGS_IEOT_FMASK    GENMASK(9, 9)
0085 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
0086 #define TRE_FLAGS_TYPE_FMASK    GENMASK(23, 16)
0087 
0088 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
0089             u32 max_alloc)
0090 {
0091     void *virt;
0092 
0093     if (!size)
0094         return -EINVAL;
0095     if (count < max_alloc)
0096         return -EINVAL;
0097     if (!max_alloc)
0098         return -EINVAL;
0099 
0100     /* By allocating a few extra entries in our pool (one less
0101      * than the maximum number that will be requested in a
0102      * single allocation), we can always satisfy requests without
0103      * ever worrying about straddling the end of the pool array.
0104      * If there aren't enough entries starting at the free index,
0105      * we just allocate free entries from the beginning of the pool.
0106      */
0107     virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
0108     if (!virt)
0109         return -ENOMEM;
0110 
0111     pool->base = virt;
0112     /* If the allocator gave us any extra memory, use it */
0113     pool->count = ksize(pool->base) / size;
0114     pool->free = 0;
0115     pool->max_alloc = max_alloc;
0116     pool->size = size;
0117     pool->addr = 0;     /* Only used for DMA pools */
0118 
0119     return 0;
0120 }
0121 
0122 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
0123 {
0124     kfree(pool->base);
0125     memset(pool, 0, sizeof(*pool));
0126 }
0127 
0128 /* Allocate the requested number of (zeroed) entries from the pool */
0129 /* Home-grown DMA pool.  This way we can preallocate and use the tre_count
0130  * to guarantee allocations will succeed.  Even though we specify max_alloc
0131  * (and it can be more than one), we only allow allocation of a single
0132  * element from a DMA pool.
0133  */
0134 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
0135                 size_t size, u32 count, u32 max_alloc)
0136 {
0137     size_t total_size;
0138     dma_addr_t addr;
0139     void *virt;
0140 
0141     if (!size)
0142         return -EINVAL;
0143     if (count < max_alloc)
0144         return -EINVAL;
0145     if (!max_alloc)
0146         return -EINVAL;
0147 
0148     /* Don't let allocations cross a power-of-two boundary */
0149     size = __roundup_pow_of_two(size);
0150     total_size = (count + max_alloc - 1) * size;
0151 
0152     /* The allocator will give us a power-of-2 number of pages
0153      * sufficient to satisfy our request.  Round up our requested
0154      * size to avoid any unused space in the allocation.  This way
0155      * gsi_trans_pool_exit_dma() can assume the total allocated
0156      * size is exactly (count * size).
0157      */
0158     total_size = get_order(total_size) << PAGE_SHIFT;
0159 
0160     virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
0161     if (!virt)
0162         return -ENOMEM;
0163 
0164     pool->base = virt;
0165     pool->count = total_size / size;
0166     pool->free = 0;
0167     pool->size = size;
0168     pool->max_alloc = max_alloc;
0169     pool->addr = addr;
0170 
0171     return 0;
0172 }
0173 
0174 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
0175 {
0176     size_t total_size = pool->count * pool->size;
0177 
0178     dma_free_coherent(dev, total_size, pool->base, pool->addr);
0179     memset(pool, 0, sizeof(*pool));
0180 }
0181 
0182 /* Return the byte offset of the next free entry in the pool */
0183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
0184 {
0185     u32 offset;
0186 
0187     WARN_ON(!count);
0188     WARN_ON(count > pool->max_alloc);
0189 
0190     /* Allocate from beginning if wrap would occur */
0191     if (count > pool->count - pool->free)
0192         pool->free = 0;
0193 
0194     offset = pool->free * pool->size;
0195     pool->free += count;
0196     memset(pool->base + offset, 0, count * pool->size);
0197 
0198     return offset;
0199 }
0200 
0201 /* Allocate a contiguous block of zeroed entries from a pool */
0202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
0203 {
0204     return pool->base + gsi_trans_pool_alloc_common(pool, count);
0205 }
0206 
0207 /* Allocate a single zeroed entry from a DMA pool */
0208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
0209 {
0210     u32 offset = gsi_trans_pool_alloc_common(pool, 1);
0211 
0212     *addr = pool->addr + offset;
0213 
0214     return pool->base + offset;
0215 }
0216 
0217 /* Map a TRE ring entry index to the transaction it is associated with */
0218 static void gsi_trans_map(struct gsi_trans *trans, u32 index)
0219 {
0220     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0221 
0222     /* The completion event will indicate the last TRE used */
0223     index += trans->used_count - 1;
0224 
0225     /* Note: index *must* be used modulo the ring count here */
0226     channel->trans_info.map[index % channel->tre_ring.count] = trans;
0227 }
0228 
0229 /* Return the transaction mapped to a given ring entry */
0230 struct gsi_trans *
0231 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
0232 {
0233     /* Note: index *must* be used modulo the ring count here */
0234     return channel->trans_info.map[index % channel->tre_ring.count];
0235 }
0236 
0237 /* Return the oldest completed transaction for a channel (or null) */
0238 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
0239 {
0240     return list_first_entry_or_null(&channel->trans_info.complete,
0241                     struct gsi_trans, links);
0242 }
0243 
0244 /* Move a transaction from the allocated list to the committed list */
0245 static void gsi_trans_move_committed(struct gsi_trans *trans)
0246 {
0247     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0248     struct gsi_trans_info *trans_info = &channel->trans_info;
0249 
0250     spin_lock_bh(&trans_info->spinlock);
0251 
0252     list_move_tail(&trans->links, &trans_info->committed);
0253 
0254     spin_unlock_bh(&trans_info->spinlock);
0255 }
0256 
0257 /* Move transactions from the committed list to the pending list */
0258 static void gsi_trans_move_pending(struct gsi_trans *trans)
0259 {
0260     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0261     struct gsi_trans_info *trans_info = &channel->trans_info;
0262     struct list_head list;
0263 
0264     spin_lock_bh(&trans_info->spinlock);
0265 
0266     /* Move this transaction and all predecessors to the pending list */
0267     list_cut_position(&list, &trans_info->committed, &trans->links);
0268     list_splice_tail(&list, &trans_info->pending);
0269 
0270     spin_unlock_bh(&trans_info->spinlock);
0271 }
0272 
0273 /* Move a transaction and all of its predecessors from the pending list
0274  * to the completed list.
0275  */
0276 void gsi_trans_move_complete(struct gsi_trans *trans)
0277 {
0278     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0279     struct gsi_trans_info *trans_info = &channel->trans_info;
0280     struct list_head list;
0281 
0282     spin_lock_bh(&trans_info->spinlock);
0283 
0284     /* Move this transaction and all predecessors to completed list */
0285     list_cut_position(&list, &trans_info->pending, &trans->links);
0286     list_splice_tail(&list, &trans_info->complete);
0287 
0288     spin_unlock_bh(&trans_info->spinlock);
0289 }
0290 
0291 /* Move a transaction from the completed list to the polled list */
0292 void gsi_trans_move_polled(struct gsi_trans *trans)
0293 {
0294     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0295     struct gsi_trans_info *trans_info = &channel->trans_info;
0296 
0297     spin_lock_bh(&trans_info->spinlock);
0298 
0299     list_move_tail(&trans->links, &trans_info->polled);
0300 
0301     spin_unlock_bh(&trans_info->spinlock);
0302 }
0303 
0304 /* Reserve some number of TREs on a channel.  Returns true if successful */
0305 static bool
0306 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
0307 {
0308     int avail = atomic_read(&trans_info->tre_avail);
0309     int new;
0310 
0311     do {
0312         new = avail - (int)tre_count;
0313         if (unlikely(new < 0))
0314             return false;
0315     } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
0316 
0317     return true;
0318 }
0319 
0320 /* Release previously-reserved TRE entries to a channel */
0321 static void
0322 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
0323 {
0324     atomic_add(tre_count, &trans_info->tre_avail);
0325 }
0326 
0327 /* Return true if no transactions are allocated, false otherwise */
0328 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
0329 {
0330     u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
0331     struct gsi_trans_info *trans_info;
0332 
0333     trans_info = &gsi->channel[channel_id].trans_info;
0334 
0335     return atomic_read(&trans_info->tre_avail) == tre_max;
0336 }
0337 
0338 /* Allocate a GSI transaction on a channel */
0339 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
0340                       u32 tre_count,
0341                       enum dma_data_direction direction)
0342 {
0343     struct gsi_channel *channel = &gsi->channel[channel_id];
0344     struct gsi_trans_info *trans_info;
0345     struct gsi_trans *trans;
0346 
0347     if (WARN_ON(tre_count > channel->trans_tre_max))
0348         return NULL;
0349 
0350     trans_info = &channel->trans_info;
0351 
0352     /* We reserve the TREs now, but consume them at commit time.
0353      * If there aren't enough available, we're done.
0354      */
0355     if (!gsi_trans_tre_reserve(trans_info, tre_count))
0356         return NULL;
0357 
0358     /* Allocate and initialize non-zero fields in the transaction */
0359     trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
0360     trans->gsi = gsi;
0361     trans->channel_id = channel_id;
0362     trans->rsvd_count = tre_count;
0363     init_completion(&trans->completion);
0364 
0365     /* Allocate the scatterlist */
0366     trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
0367     sg_init_marker(trans->sgl, tre_count);
0368 
0369     trans->direction = direction;
0370 
0371     spin_lock_bh(&trans_info->spinlock);
0372 
0373     list_add_tail(&trans->links, &trans_info->alloc);
0374 
0375     spin_unlock_bh(&trans_info->spinlock);
0376 
0377     refcount_set(&trans->refcount, 1);
0378 
0379     return trans;
0380 }
0381 
0382 /* Free a previously-allocated transaction */
0383 void gsi_trans_free(struct gsi_trans *trans)
0384 {
0385     refcount_t *refcount = &trans->refcount;
0386     struct gsi_trans_info *trans_info;
0387     bool last;
0388 
0389     /* We must hold the lock to release the last reference */
0390     if (refcount_dec_not_one(refcount))
0391         return;
0392 
0393     trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
0394 
0395     spin_lock_bh(&trans_info->spinlock);
0396 
0397     /* Reference might have been added before we got the lock */
0398     last = refcount_dec_and_test(refcount);
0399     if (last)
0400         list_del(&trans->links);
0401 
0402     spin_unlock_bh(&trans_info->spinlock);
0403 
0404     if (!last)
0405         return;
0406 
0407     if (trans->used_count)
0408         ipa_gsi_trans_release(trans);
0409 
0410     /* Releasing the reserved TREs implicitly frees the sgl[] and
0411      * (if present) info[] arrays, plus the transaction itself.
0412      */
0413     gsi_trans_tre_release(trans_info, trans->rsvd_count);
0414 }
0415 
0416 /* Add an immediate command to a transaction */
0417 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
0418                dma_addr_t addr, enum ipa_cmd_opcode opcode)
0419 {
0420     u32 which = trans->used_count++;
0421     struct scatterlist *sg;
0422 
0423     WARN_ON(which >= trans->rsvd_count);
0424 
0425     /* Commands are quite different from data transfer requests.
0426      * Their payloads come from a pool whose memory is allocated
0427      * using dma_alloc_coherent().  We therefore do *not* map them
0428      * for DMA (unlike what we do for pages and skbs).
0429      *
0430      * When a transaction completes, the SGL is normally unmapped.
0431      * A command transaction has direction DMA_NONE, which tells
0432      * gsi_trans_complete() to skip the unmapping step.
0433      *
0434      * The only things we use directly in a command scatter/gather
0435      * entry are the DMA address and length.  We still need the SG
0436      * table flags to be maintained though, so assign a NULL page
0437      * pointer for that purpose.
0438      */
0439     sg = &trans->sgl[which];
0440     sg_assign_page(sg, NULL);
0441     sg_dma_address(sg) = addr;
0442     sg_dma_len(sg) = size;
0443 
0444     trans->cmd_opcode[which] = opcode;
0445 }
0446 
0447 /* Add a page transfer to a transaction.  It will fill the only TRE. */
0448 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
0449                u32 offset)
0450 {
0451     struct scatterlist *sg = &trans->sgl[0];
0452     int ret;
0453 
0454     if (WARN_ON(trans->rsvd_count != 1))
0455         return -EINVAL;
0456     if (WARN_ON(trans->used_count))
0457         return -EINVAL;
0458 
0459     sg_set_page(sg, page, size, offset);
0460     ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
0461     if (!ret)
0462         return -ENOMEM;
0463 
0464     trans->used_count++;    /* Transaction now owns the (DMA mapped) page */
0465 
0466     return 0;
0467 }
0468 
0469 /* Add an SKB transfer to a transaction.  No other TREs will be used. */
0470 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
0471 {
0472     struct scatterlist *sg = &trans->sgl[0];
0473     u32 used_count;
0474     int ret;
0475 
0476     if (WARN_ON(trans->rsvd_count != 1))
0477         return -EINVAL;
0478     if (WARN_ON(trans->used_count))
0479         return -EINVAL;
0480 
0481     /* skb->len will not be 0 (checked early) */
0482     ret = skb_to_sgvec(skb, sg, 0, skb->len);
0483     if (ret < 0)
0484         return ret;
0485     used_count = ret;
0486 
0487     ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
0488     if (!ret)
0489         return -ENOMEM;
0490 
0491     /* Transaction now owns the (DMA mapped) skb */
0492     trans->used_count += used_count;
0493 
0494     return 0;
0495 }
0496 
0497 /* Compute the length/opcode value to use for a TRE */
0498 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
0499 {
0500     return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
0501                       : cpu_to_le16((u16)opcode);
0502 }
0503 
0504 /* Compute the flags value to use for a given TRE */
0505 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
0506 {
0507     enum gsi_tre_type tre_type;
0508     u32 tre_flags;
0509 
0510     tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
0511     tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
0512 
0513     /* Last TRE contains interrupt flags */
0514     if (last_tre) {
0515         /* All transactions end in a transfer completion interrupt */
0516         tre_flags |= TRE_FLAGS_IEOT_FMASK;
0517         /* Don't interrupt when outbound commands are acknowledged */
0518         if (bei)
0519             tre_flags |= TRE_FLAGS_BEI_FMASK;
0520     } else {    /* All others indicate there's more to come */
0521         tre_flags |= TRE_FLAGS_CHAIN_FMASK;
0522     }
0523 
0524     return cpu_to_le32(tre_flags);
0525 }
0526 
0527 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
0528                    u32 len, bool last_tre, bool bei,
0529                    enum ipa_cmd_opcode opcode)
0530 {
0531     struct gsi_tre tre;
0532 
0533     tre.addr = cpu_to_le64(addr);
0534     tre.len_opcode = gsi_tre_len_opcode(opcode, len);
0535     tre.reserved = 0;
0536     tre.flags = gsi_tre_flags(last_tre, bei, opcode);
0537 
0538     /* ARM64 can write 16 bytes as a unit with a single instruction.
0539      * Doing the assignment this way is an attempt to make that happen.
0540      */
0541     *dest_tre = tre;
0542 }
0543 
0544 /**
0545  * __gsi_trans_commit() - Common GSI transaction commit code
0546  * @trans:  Transaction to commit
0547  * @ring_db:    Whether to tell the hardware about these queued transfers
0548  *
0549  * Formats channel ring TRE entries based on the content of the scatterlist.
0550  * Maps a transaction pointer to the last ring entry used for the transaction,
0551  * so it can be recovered when it completes.  Moves the transaction to the
0552  * pending list.  Finally, updates the channel ring pointer and optionally
0553  * rings the doorbell.
0554  */
0555 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
0556 {
0557     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
0558     struct gsi_ring *tre_ring = &channel->tre_ring;
0559     enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
0560     bool bei = channel->toward_ipa;
0561     struct gsi_tre *dest_tre;
0562     struct scatterlist *sg;
0563     u32 byte_count = 0;
0564     u8 *cmd_opcode;
0565     u32 avail;
0566     u32 i;
0567 
0568     WARN_ON(!trans->used_count);
0569 
0570     /* Consume the entries.  If we cross the end of the ring while
0571      * filling them we'll switch to the beginning to finish.
0572      * If there is no info array we're doing a simple data
0573      * transfer request, whose opcode is IPA_CMD_NONE.
0574      */
0575     cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
0576     avail = tre_ring->count - tre_ring->index % tre_ring->count;
0577     dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
0578     for_each_sg(trans->sgl, sg, trans->used_count, i) {
0579         bool last_tre = i == trans->used_count - 1;
0580         dma_addr_t addr = sg_dma_address(sg);
0581         u32 len = sg_dma_len(sg);
0582 
0583         byte_count += len;
0584         if (!avail--)
0585             dest_tre = gsi_ring_virt(tre_ring, 0);
0586         if (cmd_opcode)
0587             opcode = *cmd_opcode++;
0588 
0589         gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
0590         dest_tre++;
0591     }
0592     /* Associate the TRE with the transaction */
0593     gsi_trans_map(trans, tre_ring->index);
0594 
0595     tre_ring->index += trans->used_count;
0596 
0597     trans->len = byte_count;
0598     if (channel->toward_ipa)
0599         gsi_trans_tx_committed(trans);
0600 
0601     gsi_trans_move_committed(trans);
0602 
0603     /* Ring doorbell if requested, or if all TREs are allocated */
0604     if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
0605         /* Report what we're handing off to hardware for TX channels */
0606         if (channel->toward_ipa)
0607             gsi_trans_tx_queued(trans);
0608         gsi_trans_move_pending(trans);
0609         gsi_channel_doorbell(channel);
0610     }
0611 }
0612 
0613 /* Commit a GSI transaction */
0614 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
0615 {
0616     if (trans->used_count)
0617         __gsi_trans_commit(trans, ring_db);
0618     else
0619         gsi_trans_free(trans);
0620 }
0621 
0622 /* Commit a GSI transaction and wait for it to complete */
0623 void gsi_trans_commit_wait(struct gsi_trans *trans)
0624 {
0625     if (!trans->used_count)
0626         goto out_trans_free;
0627 
0628     refcount_inc(&trans->refcount);
0629 
0630     __gsi_trans_commit(trans, true);
0631 
0632     wait_for_completion(&trans->completion);
0633 
0634 out_trans_free:
0635     gsi_trans_free(trans);
0636 }
0637 
0638 /* Process the completion of a transaction; called while polling */
0639 void gsi_trans_complete(struct gsi_trans *trans)
0640 {
0641     /* If the entire SGL was mapped when added, unmap it now */
0642     if (trans->direction != DMA_NONE)
0643         dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
0644                  trans->direction);
0645 
0646     ipa_gsi_trans_complete(trans);
0647 
0648     complete(&trans->completion);
0649 
0650     gsi_trans_free(trans);
0651 }
0652 
0653 /* Cancel a channel's pending transactions */
0654 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
0655 {
0656     struct gsi_trans_info *trans_info = &channel->trans_info;
0657     struct gsi_trans *trans;
0658     bool cancelled;
0659 
0660     /* channel->gsi->mutex is held by caller */
0661     spin_lock_bh(&trans_info->spinlock);
0662 
0663     cancelled = !list_empty(&trans_info->pending);
0664     list_for_each_entry(trans, &trans_info->pending, links)
0665         trans->cancelled = true;
0666 
0667     list_splice_tail_init(&trans_info->pending, &trans_info->complete);
0668 
0669     spin_unlock_bh(&trans_info->spinlock);
0670 
0671     /* Schedule NAPI polling to complete the cancelled transactions */
0672     if (cancelled)
0673         napi_schedule(&channel->napi);
0674 }
0675 
0676 /* Issue a command to read a single byte from a channel */
0677 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
0678 {
0679     struct gsi_channel *channel = &gsi->channel[channel_id];
0680     struct gsi_ring *tre_ring = &channel->tre_ring;
0681     struct gsi_trans_info *trans_info;
0682     struct gsi_tre *dest_tre;
0683 
0684     trans_info = &channel->trans_info;
0685 
0686     /* First reserve the TRE, if possible */
0687     if (!gsi_trans_tre_reserve(trans_info, 1))
0688         return -EBUSY;
0689 
0690     /* Now fill the reserved TRE and tell the hardware */
0691 
0692     dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
0693     gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
0694 
0695     tre_ring->index++;
0696     gsi_channel_doorbell(channel);
0697 
0698     return 0;
0699 }
0700 
0701 /* Mark a gsi_trans_read_byte() request done */
0702 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
0703 {
0704     struct gsi_channel *channel = &gsi->channel[channel_id];
0705 
0706     gsi_trans_tre_release(&channel->trans_info, 1);
0707 }
0708 
0709 /* Initialize a channel's GSI transaction info */
0710 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
0711 {
0712     struct gsi_channel *channel = &gsi->channel[channel_id];
0713     u32 tre_count = channel->tre_count;
0714     struct gsi_trans_info *trans_info;
0715     u32 tre_max;
0716     int ret;
0717 
0718     /* Ensure the size of a channel element is what's expected */
0719     BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
0720 
0721     trans_info = &channel->trans_info;
0722 
0723     /* The tre_avail field is what ultimately limits the number of
0724      * outstanding transactions and their resources.  A transaction
0725      * allocation succeeds only if the TREs available are sufficient
0726      * for what the transaction might need.
0727      */
0728     tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
0729     atomic_set(&trans_info->tre_avail, tre_max);
0730 
0731     /* We can't use more TREs than the number available in the ring.
0732      * This limits the number of transactions that can be outstanding.
0733      * Worst case is one TRE per transaction (but we actually limit
0734      * it to something a little less than that).  By allocating a
0735      * power-of-two number of transactions we can use an index
0736      * modulo that number to determine the next one that's free.
0737      * Transactions are allocated one at a time.
0738      */
0739     ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
0740                   tre_max, 1);
0741     if (ret)
0742         return -ENOMEM;
0743 
0744     /* A completion event contains a pointer to the TRE that caused
0745      * the event (which will be the last one used by the transaction).
0746      * Each entry in this map records the transaction associated
0747      * with a corresponding completed TRE.
0748      */
0749     trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
0750                   GFP_KERNEL);
0751     if (!trans_info->map) {
0752         ret = -ENOMEM;
0753         goto err_trans_free;
0754     }
0755 
0756     /* A transaction uses a scatterlist array to represent the data
0757      * transfers implemented by the transaction.  Each scatterlist
0758      * element is used to fill a single TRE when the transaction is
0759      * committed.  So we need as many scatterlist elements as the
0760      * maximum number of TREs that can be outstanding.
0761      */
0762     ret = gsi_trans_pool_init(&trans_info->sg_pool,
0763                   sizeof(struct scatterlist),
0764                   tre_max, channel->trans_tre_max);
0765     if (ret)
0766         goto err_map_free;
0767 
0768     spin_lock_init(&trans_info->spinlock);
0769     INIT_LIST_HEAD(&trans_info->alloc);
0770     INIT_LIST_HEAD(&trans_info->committed);
0771     INIT_LIST_HEAD(&trans_info->pending);
0772     INIT_LIST_HEAD(&trans_info->complete);
0773     INIT_LIST_HEAD(&trans_info->polled);
0774 
0775     return 0;
0776 
0777 err_map_free:
0778     kfree(trans_info->map);
0779 err_trans_free:
0780     gsi_trans_pool_exit(&trans_info->pool);
0781 
0782     dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
0783         ret, channel_id);
0784 
0785     return ret;
0786 }
0787 
0788 /* Inverse of gsi_channel_trans_init() */
0789 void gsi_channel_trans_exit(struct gsi_channel *channel)
0790 {
0791     struct gsi_trans_info *trans_info = &channel->trans_info;
0792 
0793     gsi_trans_pool_exit(&trans_info->sg_pool);
0794     gsi_trans_pool_exit(&trans_info->pool);
0795     kfree(trans_info->map);
0796 }