0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/uaccess.h>
0032
0033 #include "amdgpu.h"
0034 #include "amdgpu_trace.h"
0035
0036 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
0037 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
0038
0039 static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
0040 {
0041 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
0042 rhead);
0043 mutex_destroy(&list->bo_list_mutex);
0044 kvfree(list);
0045 }
0046
0047 static void amdgpu_bo_list_free(struct kref *ref)
0048 {
0049 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
0050 refcount);
0051 struct amdgpu_bo_list_entry *e;
0052
0053 amdgpu_bo_list_for_each_entry(e, list) {
0054 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
0055
0056 amdgpu_bo_unref(&bo);
0057 }
0058
0059 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
0060 }
0061
0062 int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
0063 struct drm_amdgpu_bo_list_entry *info,
0064 size_t num_entries, struct amdgpu_bo_list **result)
0065 {
0066 unsigned last_entry = 0, first_userptr = num_entries;
0067 struct amdgpu_bo_list_entry *array;
0068 struct amdgpu_bo_list *list;
0069 uint64_t total_size = 0;
0070 size_t size;
0071 unsigned i;
0072 int r;
0073
0074 if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list))
0075 / sizeof(struct amdgpu_bo_list_entry))
0076 return -EINVAL;
0077
0078 size = sizeof(struct amdgpu_bo_list);
0079 size += num_entries * sizeof(struct amdgpu_bo_list_entry);
0080 list = kvmalloc(size, GFP_KERNEL);
0081 if (!list)
0082 return -ENOMEM;
0083
0084 kref_init(&list->refcount);
0085 list->gds_obj = NULL;
0086 list->gws_obj = NULL;
0087 list->oa_obj = NULL;
0088
0089 array = amdgpu_bo_list_array_entry(list, 0);
0090 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
0091
0092 for (i = 0; i < num_entries; ++i) {
0093 struct amdgpu_bo_list_entry *entry;
0094 struct drm_gem_object *gobj;
0095 struct amdgpu_bo *bo;
0096 struct mm_struct *usermm;
0097
0098 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
0099 if (!gobj) {
0100 r = -ENOENT;
0101 goto error_free;
0102 }
0103
0104 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
0105 drm_gem_object_put(gobj);
0106
0107 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
0108 if (usermm) {
0109 if (usermm != current->mm) {
0110 amdgpu_bo_unref(&bo);
0111 r = -EPERM;
0112 goto error_free;
0113 }
0114 entry = &array[--first_userptr];
0115 } else {
0116 entry = &array[last_entry++];
0117 }
0118
0119 entry->priority = min(info[i].bo_priority,
0120 AMDGPU_BO_LIST_MAX_PRIORITY);
0121 entry->tv.bo = &bo->tbo;
0122
0123 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
0124 list->gds_obj = bo;
0125 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
0126 list->gws_obj = bo;
0127 if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
0128 list->oa_obj = bo;
0129
0130 total_size += amdgpu_bo_size(bo);
0131 trace_amdgpu_bo_list_set(list, bo);
0132 }
0133
0134 list->first_userptr = first_userptr;
0135 list->num_entries = num_entries;
0136
0137 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
0138
0139 mutex_init(&list->bo_list_mutex);
0140 *result = list;
0141 return 0;
0142
0143 error_free:
0144 for (i = 0; i < last_entry; ++i) {
0145 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
0146
0147 amdgpu_bo_unref(&bo);
0148 }
0149 for (i = first_userptr; i < num_entries; ++i) {
0150 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
0151
0152 amdgpu_bo_unref(&bo);
0153 }
0154 kvfree(list);
0155 return r;
0156
0157 }
0158
0159 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
0160 {
0161 struct amdgpu_bo_list *list;
0162
0163 mutex_lock(&fpriv->bo_list_lock);
0164 list = idr_remove(&fpriv->bo_list_handles, id);
0165 mutex_unlock(&fpriv->bo_list_lock);
0166 if (list)
0167 kref_put(&list->refcount, amdgpu_bo_list_free);
0168 }
0169
0170 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
0171 struct amdgpu_bo_list **result)
0172 {
0173 rcu_read_lock();
0174 *result = idr_find(&fpriv->bo_list_handles, id);
0175
0176 if (*result && kref_get_unless_zero(&(*result)->refcount)) {
0177 rcu_read_unlock();
0178 return 0;
0179 }
0180
0181 rcu_read_unlock();
0182 return -ENOENT;
0183 }
0184
0185 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
0186 struct list_head *validated)
0187 {
0188
0189
0190
0191
0192 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
0193 struct amdgpu_bo_list_entry *e;
0194 unsigned i;
0195
0196 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
0197 INIT_LIST_HEAD(&bucket[i]);
0198
0199
0200
0201
0202
0203
0204 amdgpu_bo_list_for_each_entry(e, list) {
0205 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
0206 unsigned priority = e->priority;
0207
0208 if (!bo->parent)
0209 list_add_tail(&e->tv.head, &bucket[priority]);
0210
0211 e->user_pages = NULL;
0212 }
0213
0214
0215 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
0216 list_splice(&bucket[i], validated);
0217 }
0218
0219 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
0220 {
0221 kref_put(&list->refcount, amdgpu_bo_list_free);
0222 }
0223
0224 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
0225 struct drm_amdgpu_bo_list_entry **info_param)
0226 {
0227 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
0228 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
0229 struct drm_amdgpu_bo_list_entry *info;
0230 int r;
0231
0232 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
0233 if (!info)
0234 return -ENOMEM;
0235
0236
0237 r = -EFAULT;
0238 if (likely(info_size == in->bo_info_size)) {
0239 unsigned long bytes = in->bo_number *
0240 in->bo_info_size;
0241
0242 if (copy_from_user(info, uptr, bytes))
0243 goto error_free;
0244
0245 } else {
0246 unsigned long bytes = min(in->bo_info_size, info_size);
0247 unsigned i;
0248
0249 memset(info, 0, in->bo_number * info_size);
0250 for (i = 0; i < in->bo_number; ++i) {
0251 if (copy_from_user(&info[i], uptr, bytes))
0252 goto error_free;
0253
0254 uptr += in->bo_info_size;
0255 }
0256 }
0257
0258 *info_param = info;
0259 return 0;
0260
0261 error_free:
0262 kvfree(info);
0263 return r;
0264 }
0265
0266 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
0267 struct drm_file *filp)
0268 {
0269 struct amdgpu_device *adev = drm_to_adev(dev);
0270 struct amdgpu_fpriv *fpriv = filp->driver_priv;
0271 union drm_amdgpu_bo_list *args = data;
0272 uint32_t handle = args->in.list_handle;
0273 struct drm_amdgpu_bo_list_entry *info = NULL;
0274 struct amdgpu_bo_list *list, *old;
0275 int r;
0276
0277 r = amdgpu_bo_create_list_entry_array(&args->in, &info);
0278 if (r)
0279 return r;
0280
0281 switch (args->in.operation) {
0282 case AMDGPU_BO_LIST_OP_CREATE:
0283 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
0284 &list);
0285 if (r)
0286 goto error_free;
0287
0288 mutex_lock(&fpriv->bo_list_lock);
0289 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
0290 mutex_unlock(&fpriv->bo_list_lock);
0291 if (r < 0) {
0292 goto error_put_list;
0293 }
0294
0295 handle = r;
0296 break;
0297
0298 case AMDGPU_BO_LIST_OP_DESTROY:
0299 amdgpu_bo_list_destroy(fpriv, handle);
0300 handle = 0;
0301 break;
0302
0303 case AMDGPU_BO_LIST_OP_UPDATE:
0304 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
0305 &list);
0306 if (r)
0307 goto error_free;
0308
0309 mutex_lock(&fpriv->bo_list_lock);
0310 old = idr_replace(&fpriv->bo_list_handles, list, handle);
0311 mutex_unlock(&fpriv->bo_list_lock);
0312
0313 if (IS_ERR(old)) {
0314 r = PTR_ERR(old);
0315 goto error_put_list;
0316 }
0317
0318 amdgpu_bo_list_put(old);
0319 break;
0320
0321 default:
0322 r = -EINVAL;
0323 goto error_free;
0324 }
0325
0326 memset(args, 0, sizeof(*args));
0327 args->out.list_handle = handle;
0328 kvfree(info);
0329
0330 return 0;
0331
0332 error_put_list:
0333 amdgpu_bo_list_put(list);
0334
0335 error_free:
0336 kvfree(info);
0337 return r;
0338 }