Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * fence-chain: chain fences together in a timeline
0004  *
0005  * Copyright (C) 2018 Advanced Micro Devices, Inc.
0006  * Authors:
0007  *  Christian König <christian.koenig@amd.com>
0008  */
0009 
0010 #include <linux/dma-fence-chain.h>
0011 
0012 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
0013 
0014 /**
0015  * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
0016  * @chain: chain node to get the previous node from
0017  *
0018  * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
0019  * chain node.
0020  */
0021 static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
0022 {
0023     struct dma_fence *prev;
0024 
0025     rcu_read_lock();
0026     prev = dma_fence_get_rcu_safe(&chain->prev);
0027     rcu_read_unlock();
0028     return prev;
0029 }
0030 
0031 /**
0032  * dma_fence_chain_walk - chain walking function
0033  * @fence: current chain node
0034  *
0035  * Walk the chain to the next node. Returns the next fence or NULL if we are at
0036  * the end of the chain. Garbage collects chain nodes which are already
0037  * signaled.
0038  */
0039 struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
0040 {
0041     struct dma_fence_chain *chain, *prev_chain;
0042     struct dma_fence *prev, *replacement, *tmp;
0043 
0044     chain = to_dma_fence_chain(fence);
0045     if (!chain) {
0046         dma_fence_put(fence);
0047         return NULL;
0048     }
0049 
0050     while ((prev = dma_fence_chain_get_prev(chain))) {
0051 
0052         prev_chain = to_dma_fence_chain(prev);
0053         if (prev_chain) {
0054             if (!dma_fence_is_signaled(prev_chain->fence))
0055                 break;
0056 
0057             replacement = dma_fence_chain_get_prev(prev_chain);
0058         } else {
0059             if (!dma_fence_is_signaled(prev))
0060                 break;
0061 
0062             replacement = NULL;
0063         }
0064 
0065         tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev),
0066                          RCU_INITIALIZER(replacement)));
0067         if (tmp == prev)
0068             dma_fence_put(tmp);
0069         else
0070             dma_fence_put(replacement);
0071         dma_fence_put(prev);
0072     }
0073 
0074     dma_fence_put(fence);
0075     return prev;
0076 }
0077 EXPORT_SYMBOL(dma_fence_chain_walk);
0078 
0079 /**
0080  * dma_fence_chain_find_seqno - find fence chain node by seqno
0081  * @pfence: pointer to the chain node where to start
0082  * @seqno: the sequence number to search for
0083  *
0084  * Advance the fence pointer to the chain node which will signal this sequence
0085  * number. If no sequence number is provided then this is a no-op.
0086  *
0087  * Returns EINVAL if the fence is not a chain node or the sequence number has
0088  * not yet advanced far enough.
0089  */
0090 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
0091 {
0092     struct dma_fence_chain *chain;
0093 
0094     if (!seqno)
0095         return 0;
0096 
0097     chain = to_dma_fence_chain(*pfence);
0098     if (!chain || chain->base.seqno < seqno)
0099         return -EINVAL;
0100 
0101     dma_fence_chain_for_each(*pfence, &chain->base) {
0102         if ((*pfence)->context != chain->base.context ||
0103             to_dma_fence_chain(*pfence)->prev_seqno < seqno)
0104             break;
0105     }
0106     dma_fence_put(&chain->base);
0107 
0108     return 0;
0109 }
0110 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
0111 
0112 static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
0113 {
0114         return "dma_fence_chain";
0115 }
0116 
0117 static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
0118 {
0119         return "unbound";
0120 }
0121 
0122 static void dma_fence_chain_irq_work(struct irq_work *work)
0123 {
0124     struct dma_fence_chain *chain;
0125 
0126     chain = container_of(work, typeof(*chain), work);
0127 
0128     /* Try to rearm the callback */
0129     if (!dma_fence_chain_enable_signaling(&chain->base))
0130         /* Ok, we are done. No more unsignaled fences left */
0131         dma_fence_signal(&chain->base);
0132     dma_fence_put(&chain->base);
0133 }
0134 
0135 static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
0136 {
0137     struct dma_fence_chain *chain;
0138 
0139     chain = container_of(cb, typeof(*chain), cb);
0140     init_irq_work(&chain->work, dma_fence_chain_irq_work);
0141     irq_work_queue(&chain->work);
0142     dma_fence_put(f);
0143 }
0144 
0145 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
0146 {
0147     struct dma_fence_chain *head = to_dma_fence_chain(fence);
0148 
0149     dma_fence_get(&head->base);
0150     dma_fence_chain_for_each(fence, &head->base) {
0151         struct dma_fence *f = dma_fence_chain_contained(fence);
0152 
0153         dma_fence_get(f);
0154         if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
0155             dma_fence_put(fence);
0156             return true;
0157         }
0158         dma_fence_put(f);
0159     }
0160     dma_fence_put(&head->base);
0161     return false;
0162 }
0163 
0164 static bool dma_fence_chain_signaled(struct dma_fence *fence)
0165 {
0166     dma_fence_chain_for_each(fence, fence) {
0167         struct dma_fence *f = dma_fence_chain_contained(fence);
0168 
0169         if (!dma_fence_is_signaled(f)) {
0170             dma_fence_put(fence);
0171             return false;
0172         }
0173     }
0174 
0175     return true;
0176 }
0177 
0178 static void dma_fence_chain_release(struct dma_fence *fence)
0179 {
0180     struct dma_fence_chain *chain = to_dma_fence_chain(fence);
0181     struct dma_fence *prev;
0182 
0183     /* Manually unlink the chain as much as possible to avoid recursion
0184      * and potential stack overflow.
0185      */
0186     while ((prev = rcu_dereference_protected(chain->prev, true))) {
0187         struct dma_fence_chain *prev_chain;
0188 
0189         if (kref_read(&prev->refcount) > 1)
0190                break;
0191 
0192         prev_chain = to_dma_fence_chain(prev);
0193         if (!prev_chain)
0194             break;
0195 
0196         /* No need for atomic operations since we hold the last
0197          * reference to prev_chain.
0198          */
0199         chain->prev = prev_chain->prev;
0200         RCU_INIT_POINTER(prev_chain->prev, NULL);
0201         dma_fence_put(prev);
0202     }
0203     dma_fence_put(prev);
0204 
0205     dma_fence_put(chain->fence);
0206     dma_fence_free(fence);
0207 }
0208 
0209 const struct dma_fence_ops dma_fence_chain_ops = {
0210     .use_64bit_seqno = true,
0211     .get_driver_name = dma_fence_chain_get_driver_name,
0212     .get_timeline_name = dma_fence_chain_get_timeline_name,
0213     .enable_signaling = dma_fence_chain_enable_signaling,
0214     .signaled = dma_fence_chain_signaled,
0215     .release = dma_fence_chain_release,
0216 };
0217 EXPORT_SYMBOL(dma_fence_chain_ops);
0218 
0219 /**
0220  * dma_fence_chain_init - initialize a fence chain
0221  * @chain: the chain node to initialize
0222  * @prev: the previous fence
0223  * @fence: the current fence
0224  * @seqno: the sequence number to use for the fence chain
0225  *
0226  * Initialize a new chain node and either start a new chain or add the node to
0227  * the existing chain of the previous fence.
0228  */
0229 void dma_fence_chain_init(struct dma_fence_chain *chain,
0230               struct dma_fence *prev,
0231               struct dma_fence *fence,
0232               uint64_t seqno)
0233 {
0234     struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
0235     uint64_t context;
0236 
0237     spin_lock_init(&chain->lock);
0238     rcu_assign_pointer(chain->prev, prev);
0239     chain->fence = fence;
0240     chain->prev_seqno = 0;
0241 
0242     /* Try to reuse the context of the previous chain node. */
0243     if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
0244         context = prev->context;
0245         chain->prev_seqno = prev->seqno;
0246     } else {
0247         context = dma_fence_context_alloc(1);
0248         /* Make sure that we always have a valid sequence number. */
0249         if (prev_chain)
0250             seqno = max(prev->seqno, seqno);
0251     }
0252 
0253     dma_fence_init(&chain->base, &dma_fence_chain_ops,
0254                &chain->lock, context, seqno);
0255 
0256     /*
0257      * Chaining dma_fence_chain container together is only allowed through
0258      * the prev fence and not through the contained fence.
0259      *
0260      * The correct way of handling this is to flatten out the fence
0261      * structure into a dma_fence_array by the caller instead.
0262      */
0263     WARN_ON(dma_fence_is_chain(fence));
0264 }
0265 EXPORT_SYMBOL(dma_fence_chain_init);