Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * dma-fence-util: misc functions for dma_fence objects
0004  *
0005  * Copyright (C) 2022 Advanced Micro Devices, Inc.
0006  * Authors:
0007  *  Christian König <christian.koenig@amd.com>
0008  */
0009 
0010 #include <linux/dma-fence.h>
0011 #include <linux/dma-fence-array.h>
0012 #include <linux/dma-fence-chain.h>
0013 #include <linux/dma-fence-unwrap.h>
0014 #include <linux/slab.h>
0015 
0016 /* Internal helper to start new array iteration, don't use directly */
0017 static struct dma_fence *
0018 __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
0019 {
0020     cursor->array = dma_fence_chain_contained(cursor->chain);
0021     cursor->index = 0;
0022     return dma_fence_array_first(cursor->array);
0023 }
0024 
0025 /**
0026  * dma_fence_unwrap_first - return the first fence from fence containers
0027  * @head: the entrypoint into the containers
0028  * @cursor: current position inside the containers
0029  *
0030  * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
0031  * first fence.
0032  */
0033 struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
0034                      struct dma_fence_unwrap *cursor)
0035 {
0036     cursor->chain = dma_fence_get(head);
0037     return __dma_fence_unwrap_array(cursor);
0038 }
0039 EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
0040 
0041 /**
0042  * dma_fence_unwrap_next - return the next fence from a fence containers
0043  * @cursor: current position inside the containers
0044  *
0045  * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
0046  * the next fence from them.
0047  */
0048 struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
0049 {
0050     struct dma_fence *tmp;
0051 
0052     ++cursor->index;
0053     tmp = dma_fence_array_next(cursor->array, cursor->index);
0054     if (tmp)
0055         return tmp;
0056 
0057     cursor->chain = dma_fence_chain_walk(cursor->chain);
0058     return __dma_fence_unwrap_array(cursor);
0059 }
0060 EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
0061 
0062 /* Implementation for the dma_fence_merge() marco, don't use directly */
0063 struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
0064                        struct dma_fence **fences,
0065                        struct dma_fence_unwrap *iter)
0066 {
0067     struct dma_fence_array *result;
0068     struct dma_fence *tmp, **array;
0069     unsigned int i;
0070     size_t count;
0071 
0072     count = 0;
0073     for (i = 0; i < num_fences; ++i) {
0074         dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
0075             if (!dma_fence_is_signaled(tmp))
0076                 ++count;
0077     }
0078 
0079     if (count == 0)
0080         return dma_fence_get_stub();
0081 
0082     array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
0083     if (!array)
0084         return NULL;
0085 
0086     /*
0087      * This trashes the input fence array and uses it as position for the
0088      * following merge loop. This works because the dma_fence_merge()
0089      * wrapper macro is creating this temporary array on the stack together
0090      * with the iterators.
0091      */
0092     for (i = 0; i < num_fences; ++i)
0093         fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
0094 
0095     count = 0;
0096     do {
0097         unsigned int sel;
0098 
0099 restart:
0100         tmp = NULL;
0101         for (i = 0; i < num_fences; ++i) {
0102             struct dma_fence *next;
0103 
0104             while (fences[i] && dma_fence_is_signaled(fences[i]))
0105                 fences[i] = dma_fence_unwrap_next(&iter[i]);
0106 
0107             next = fences[i];
0108             if (!next)
0109                 continue;
0110 
0111             /*
0112              * We can't guarantee that inpute fences are ordered by
0113              * context, but it is still quite likely when this
0114              * function is used multiple times. So attempt to order
0115              * the fences by context as we pass over them and merge
0116              * fences with the same context.
0117              */
0118             if (!tmp || tmp->context > next->context) {
0119                 tmp = next;
0120                 sel = i;
0121 
0122             } else if (tmp->context < next->context) {
0123                 continue;
0124 
0125             } else if (dma_fence_is_later(tmp, next)) {
0126                 fences[i] = dma_fence_unwrap_next(&iter[i]);
0127                 goto restart;
0128             } else {
0129                 fences[sel] = dma_fence_unwrap_next(&iter[sel]);
0130                 goto restart;
0131             }
0132         }
0133 
0134         if (tmp) {
0135             array[count++] = dma_fence_get(tmp);
0136             fences[sel] = dma_fence_unwrap_next(&iter[sel]);
0137         }
0138     } while (tmp);
0139 
0140     if (count == 0) {
0141         tmp = dma_fence_get_stub();
0142         goto return_tmp;
0143     }
0144 
0145     if (count == 1) {
0146         tmp = array[0];
0147         goto return_tmp;
0148     }
0149 
0150     result = dma_fence_array_create(count, array,
0151                     dma_fence_context_alloc(1),
0152                     1, false);
0153     if (!result) {
0154         tmp = NULL;
0155         goto return_tmp;
0156     }
0157     return &result->base;
0158 
0159 return_tmp:
0160     kfree(array);
0161     return tmp;
0162 }
0163 EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);