0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <drm/ttm/ttm_execbuf_util.h>
0030 #include <drm/ttm/ttm_bo_driver.h>
0031 #include <drm/ttm/ttm_placement.h>
0032 #include <linux/wait.h>
0033 #include <linux/sched.h>
0034 #include <linux/module.h>
0035
0036 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
0037 struct ttm_validate_buffer *entry)
0038 {
0039 list_for_each_entry_continue_reverse(entry, list, head) {
0040 struct ttm_buffer_object *bo = entry->bo;
0041
0042 dma_resv_unlock(bo->base.resv);
0043 }
0044 }
0045
0046 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
0047 struct list_head *list)
0048 {
0049 struct ttm_validate_buffer *entry;
0050
0051 if (list_empty(list))
0052 return;
0053
0054 list_for_each_entry(entry, list, head) {
0055 struct ttm_buffer_object *bo = entry->bo;
0056
0057 ttm_bo_move_to_lru_tail_unlocked(bo);
0058 dma_resv_unlock(bo->base.resv);
0059 }
0060
0061 if (ticket)
0062 ww_acquire_fini(ticket);
0063 }
0064 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
0079 struct list_head *list, bool intr,
0080 struct list_head *dups)
0081 {
0082 struct ttm_validate_buffer *entry;
0083 int ret;
0084
0085 if (list_empty(list))
0086 return 0;
0087
0088 if (ticket)
0089 ww_acquire_init(ticket, &reservation_ww_class);
0090
0091 list_for_each_entry(entry, list, head) {
0092 struct ttm_buffer_object *bo = entry->bo;
0093 unsigned int num_fences;
0094
0095 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
0096 if (ret == -EALREADY && dups) {
0097 struct ttm_validate_buffer *safe = entry;
0098 entry = list_prev_entry(entry, head);
0099 list_del(&safe->head);
0100 list_add(&safe->head, dups);
0101 continue;
0102 }
0103
0104 num_fences = max(entry->num_shared, 1u);
0105 if (!ret) {
0106 ret = dma_resv_reserve_fences(bo->base.resv,
0107 num_fences);
0108 if (!ret)
0109 continue;
0110 }
0111
0112
0113
0114
0115
0116 ttm_eu_backoff_reservation_reverse(list, entry);
0117
0118 if (ret == -EDEADLK) {
0119 ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
0120 }
0121
0122 if (!ret)
0123 ret = dma_resv_reserve_fences(bo->base.resv,
0124 num_fences);
0125
0126 if (unlikely(ret != 0)) {
0127 if (ticket) {
0128 ww_acquire_done(ticket);
0129 ww_acquire_fini(ticket);
0130 }
0131 return ret;
0132 }
0133
0134
0135
0136
0137 list_del(&entry->head);
0138 list_add(&entry->head, list);
0139 }
0140
0141 return 0;
0142 }
0143 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
0144
0145 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
0146 struct list_head *list,
0147 struct dma_fence *fence)
0148 {
0149 struct ttm_validate_buffer *entry;
0150
0151 if (list_empty(list))
0152 return;
0153
0154 list_for_each_entry(entry, list, head) {
0155 struct ttm_buffer_object *bo = entry->bo;
0156
0157 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
0158 DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
0159 ttm_bo_move_to_lru_tail_unlocked(bo);
0160 dma_resv_unlock(bo->base.resv);
0161 }
0162 if (ticket)
0163 ww_acquire_fini(ticket);
0164 }
0165 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);