0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #define pr_fmt(fmt) "[TTM] " fmt
0046
0047 #include <linux/list.h>
0048 #include <linux/spinlock.h>
0049 #include <linux/slab.h>
0050 #include <linux/atomic.h>
0051 #include <linux/module.h>
0052 #include "ttm_object.h"
0053 #include "vmwgfx_drv.h"
0054
0055 MODULE_IMPORT_NS(DMA_BUF);
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 struct ttm_object_file {
0074 struct ttm_object_device *tdev;
0075 spinlock_t lock;
0076 struct list_head ref_list;
0077 struct vmwgfx_open_hash ref_hash;
0078 struct kref refcount;
0079 };
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 struct ttm_object_device {
0094 spinlock_t object_lock;
0095 struct vmwgfx_open_hash object_hash;
0096 atomic_t object_count;
0097 struct dma_buf_ops ops;
0098 void (*dmabuf_release)(struct dma_buf *dma_buf);
0099 struct idr idr;
0100 };
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 struct ttm_ref_object {
0124 struct rcu_head rcu_head;
0125 struct vmwgfx_hash_item hash;
0126 struct list_head head;
0127 struct kref kref;
0128 struct ttm_base_object *obj;
0129 struct ttm_object_file *tfile;
0130 };
0131
0132 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
0133
0134 static inline struct ttm_object_file *
0135 ttm_object_file_ref(struct ttm_object_file *tfile)
0136 {
0137 kref_get(&tfile->refcount);
0138 return tfile;
0139 }
0140
0141 static void ttm_object_file_destroy(struct kref *kref)
0142 {
0143 struct ttm_object_file *tfile =
0144 container_of(kref, struct ttm_object_file, refcount);
0145
0146 kfree(tfile);
0147 }
0148
0149
0150 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
0151 {
0152 struct ttm_object_file *tfile = *p_tfile;
0153
0154 *p_tfile = NULL;
0155 kref_put(&tfile->refcount, ttm_object_file_destroy);
0156 }
0157
0158
0159 int ttm_base_object_init(struct ttm_object_file *tfile,
0160 struct ttm_base_object *base,
0161 bool shareable,
0162 enum ttm_object_type object_type,
0163 void (*refcount_release) (struct ttm_base_object **))
0164 {
0165 struct ttm_object_device *tdev = tfile->tdev;
0166 int ret;
0167
0168 base->shareable = shareable;
0169 base->tfile = ttm_object_file_ref(tfile);
0170 base->refcount_release = refcount_release;
0171 base->object_type = object_type;
0172 kref_init(&base->refcount);
0173 idr_preload(GFP_KERNEL);
0174 spin_lock(&tdev->object_lock);
0175 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
0176 spin_unlock(&tdev->object_lock);
0177 idr_preload_end();
0178 if (ret < 0)
0179 return ret;
0180
0181 base->handle = ret;
0182 ret = ttm_ref_object_add(tfile, base, NULL, false);
0183 if (unlikely(ret != 0))
0184 goto out_err1;
0185
0186 ttm_base_object_unref(&base);
0187
0188 return 0;
0189 out_err1:
0190 spin_lock(&tdev->object_lock);
0191 idr_remove(&tdev->idr, base->handle);
0192 spin_unlock(&tdev->object_lock);
0193 return ret;
0194 }
0195
0196 static void ttm_release_base(struct kref *kref)
0197 {
0198 struct ttm_base_object *base =
0199 container_of(kref, struct ttm_base_object, refcount);
0200 struct ttm_object_device *tdev = base->tfile->tdev;
0201
0202 spin_lock(&tdev->object_lock);
0203 idr_remove(&tdev->idr, base->handle);
0204 spin_unlock(&tdev->object_lock);
0205
0206
0207
0208
0209
0210
0211
0212 ttm_object_file_unref(&base->tfile);
0213 if (base->refcount_release)
0214 base->refcount_release(&base);
0215 }
0216
0217 void ttm_base_object_unref(struct ttm_base_object **p_base)
0218 {
0219 struct ttm_base_object *base = *p_base;
0220
0221 *p_base = NULL;
0222
0223 kref_put(&base->refcount, ttm_release_base);
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 struct ttm_base_object *
0243 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
0244 {
0245 struct vmwgfx_hash_item *hash;
0246 struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0247 int ret;
0248
0249 rcu_read_lock();
0250 ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
0251 if (ret) {
0252 rcu_read_unlock();
0253 return NULL;
0254 }
0255
0256 __release(RCU);
0257 return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
0258 }
0259 EXPORT_SYMBOL(ttm_base_object_noref_lookup);
0260
0261 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
0262 uint32_t key)
0263 {
0264 struct ttm_base_object *base = NULL;
0265 struct vmwgfx_hash_item *hash;
0266 struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0267 int ret;
0268
0269 rcu_read_lock();
0270 ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
0271
0272 if (likely(ret == 0)) {
0273 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
0274 if (!kref_get_unless_zero(&base->refcount))
0275 base = NULL;
0276 }
0277 rcu_read_unlock();
0278
0279 return base;
0280 }
0281
0282 struct ttm_base_object *
0283 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
0284 {
0285 struct ttm_base_object *base;
0286
0287 rcu_read_lock();
0288 base = idr_find(&tdev->idr, key);
0289
0290 if (base && !kref_get_unless_zero(&base->refcount))
0291 base = NULL;
0292 rcu_read_unlock();
0293
0294 return base;
0295 }
0296
0297 int ttm_ref_object_add(struct ttm_object_file *tfile,
0298 struct ttm_base_object *base,
0299 bool *existed,
0300 bool require_existed)
0301 {
0302 struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0303 struct ttm_ref_object *ref;
0304 struct vmwgfx_hash_item *hash;
0305 int ret = -EINVAL;
0306
0307 if (base->tfile != tfile && !base->shareable)
0308 return -EPERM;
0309
0310 if (existed != NULL)
0311 *existed = true;
0312
0313 while (ret == -EINVAL) {
0314 rcu_read_lock();
0315 ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
0316
0317 if (ret == 0) {
0318 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
0319 if (kref_get_unless_zero(&ref->kref)) {
0320 rcu_read_unlock();
0321 break;
0322 }
0323 }
0324
0325 rcu_read_unlock();
0326 if (require_existed)
0327 return -EPERM;
0328
0329 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
0330 if (unlikely(ref == NULL)) {
0331 return -ENOMEM;
0332 }
0333
0334 ref->hash.key = base->handle;
0335 ref->obj = base;
0336 ref->tfile = tfile;
0337 kref_init(&ref->kref);
0338
0339 spin_lock(&tfile->lock);
0340 ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
0341
0342 if (likely(ret == 0)) {
0343 list_add_tail(&ref->head, &tfile->ref_list);
0344 kref_get(&base->refcount);
0345 spin_unlock(&tfile->lock);
0346 if (existed != NULL)
0347 *existed = false;
0348 break;
0349 }
0350
0351 spin_unlock(&tfile->lock);
0352 BUG_ON(ret != -EINVAL);
0353
0354 kfree(ref);
0355 }
0356
0357 return ret;
0358 }
0359
0360 static void __releases(tfile->lock) __acquires(tfile->lock)
0361 ttm_ref_object_release(struct kref *kref)
0362 {
0363 struct ttm_ref_object *ref =
0364 container_of(kref, struct ttm_ref_object, kref);
0365 struct ttm_object_file *tfile = ref->tfile;
0366 struct vmwgfx_open_hash *ht;
0367
0368 ht = &tfile->ref_hash;
0369 (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
0370 list_del(&ref->head);
0371 spin_unlock(&tfile->lock);
0372
0373 ttm_base_object_unref(&ref->obj);
0374 kfree_rcu(ref, rcu_head);
0375 spin_lock(&tfile->lock);
0376 }
0377
0378 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
0379 unsigned long key)
0380 {
0381 struct vmwgfx_open_hash *ht = &tfile->ref_hash;
0382 struct ttm_ref_object *ref;
0383 struct vmwgfx_hash_item *hash;
0384 int ret;
0385
0386 spin_lock(&tfile->lock);
0387 ret = vmwgfx_ht_find_item(ht, key, &hash);
0388 if (unlikely(ret != 0)) {
0389 spin_unlock(&tfile->lock);
0390 return -EINVAL;
0391 }
0392 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
0393 kref_put(&ref->kref, ttm_ref_object_release);
0394 spin_unlock(&tfile->lock);
0395 return 0;
0396 }
0397
0398 void ttm_object_file_release(struct ttm_object_file **p_tfile)
0399 {
0400 struct ttm_ref_object *ref;
0401 struct list_head *list;
0402 struct ttm_object_file *tfile = *p_tfile;
0403
0404 *p_tfile = NULL;
0405 spin_lock(&tfile->lock);
0406
0407
0408
0409
0410
0411
0412 while (!list_empty(&tfile->ref_list)) {
0413 list = tfile->ref_list.next;
0414 ref = list_entry(list, struct ttm_ref_object, head);
0415 ttm_ref_object_release(&ref->kref);
0416 }
0417
0418 spin_unlock(&tfile->lock);
0419 vmwgfx_ht_remove(&tfile->ref_hash);
0420
0421 ttm_object_file_unref(&tfile);
0422 }
0423
0424 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
0425 unsigned int hash_order)
0426 {
0427 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
0428 int ret;
0429
0430 if (unlikely(tfile == NULL))
0431 return NULL;
0432
0433 spin_lock_init(&tfile->lock);
0434 tfile->tdev = tdev;
0435 kref_init(&tfile->refcount);
0436 INIT_LIST_HEAD(&tfile->ref_list);
0437
0438 ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
0439 if (ret)
0440 goto out_err;
0441
0442 return tfile;
0443 out_err:
0444 vmwgfx_ht_remove(&tfile->ref_hash);
0445
0446 kfree(tfile);
0447
0448 return NULL;
0449 }
0450
0451 struct ttm_object_device *
0452 ttm_object_device_init(unsigned int hash_order,
0453 const struct dma_buf_ops *ops)
0454 {
0455 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
0456 int ret;
0457
0458 if (unlikely(tdev == NULL))
0459 return NULL;
0460
0461 spin_lock_init(&tdev->object_lock);
0462 atomic_set(&tdev->object_count, 0);
0463 ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
0464 if (ret != 0)
0465 goto out_no_object_hash;
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
0476 tdev->ops = *ops;
0477 tdev->dmabuf_release = tdev->ops.release;
0478 tdev->ops.release = ttm_prime_dmabuf_release;
0479 return tdev;
0480
0481 out_no_object_hash:
0482 kfree(tdev);
0483 return NULL;
0484 }
0485
0486 void ttm_object_device_release(struct ttm_object_device **p_tdev)
0487 {
0488 struct ttm_object_device *tdev = *p_tdev;
0489
0490 *p_tdev = NULL;
0491
0492 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
0493 idr_destroy(&tdev->idr);
0494 vmwgfx_ht_remove(&tdev->object_hash);
0495
0496 kfree(tdev);
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
0513 {
0514 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527 static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
0528 {
0529 struct ttm_base_object *base = *p_base;
0530 struct ttm_prime_object *prime;
0531
0532 *p_base = NULL;
0533 prime = container_of(base, struct ttm_prime_object, base);
0534 BUG_ON(prime->dma_buf != NULL);
0535 mutex_destroy(&prime->mutex);
0536 if (prime->refcount_release)
0537 prime->refcount_release(&base);
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
0551 {
0552 struct ttm_prime_object *prime =
0553 (struct ttm_prime_object *) dma_buf->priv;
0554 struct ttm_base_object *base = &prime->base;
0555 struct ttm_object_device *tdev = base->tfile->tdev;
0556
0557 if (tdev->dmabuf_release)
0558 tdev->dmabuf_release(dma_buf);
0559 mutex_lock(&prime->mutex);
0560 if (prime->dma_buf == dma_buf)
0561 prime->dma_buf = NULL;
0562 mutex_unlock(&prime->mutex);
0563 ttm_base_object_unref(&base);
0564 }
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
0578 int fd, u32 *handle)
0579 {
0580 struct ttm_object_device *tdev = tfile->tdev;
0581 struct dma_buf *dma_buf;
0582 struct ttm_prime_object *prime;
0583 struct ttm_base_object *base;
0584 int ret;
0585
0586 dma_buf = dma_buf_get(fd);
0587 if (IS_ERR(dma_buf))
0588 return PTR_ERR(dma_buf);
0589
0590 if (dma_buf->ops != &tdev->ops)
0591 return -ENOSYS;
0592
0593 prime = (struct ttm_prime_object *) dma_buf->priv;
0594 base = &prime->base;
0595 *handle = base->handle;
0596 ret = ttm_ref_object_add(tfile, base, NULL, false);
0597
0598 dma_buf_put(dma_buf);
0599
0600 return ret;
0601 }
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
0613 uint32_t handle, uint32_t flags,
0614 int *prime_fd)
0615 {
0616 struct ttm_object_device *tdev = tfile->tdev;
0617 struct ttm_base_object *base;
0618 struct dma_buf *dma_buf;
0619 struct ttm_prime_object *prime;
0620 int ret;
0621
0622 base = ttm_base_object_lookup(tfile, handle);
0623 if (unlikely(base == NULL ||
0624 base->object_type != ttm_prime_type)) {
0625 ret = -ENOENT;
0626 goto out_unref;
0627 }
0628
0629 prime = container_of(base, struct ttm_prime_object, base);
0630 if (unlikely(!base->shareable)) {
0631 ret = -EPERM;
0632 goto out_unref;
0633 }
0634
0635 ret = mutex_lock_interruptible(&prime->mutex);
0636 if (unlikely(ret != 0)) {
0637 ret = -ERESTARTSYS;
0638 goto out_unref;
0639 }
0640
0641 dma_buf = prime->dma_buf;
0642 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
0643 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
0644 exp_info.ops = &tdev->ops;
0645 exp_info.size = prime->size;
0646 exp_info.flags = flags;
0647 exp_info.priv = prime;
0648
0649
0650
0651
0652
0653 dma_buf = dma_buf_export(&exp_info);
0654 if (IS_ERR(dma_buf)) {
0655 ret = PTR_ERR(dma_buf);
0656 mutex_unlock(&prime->mutex);
0657 goto out_unref;
0658 }
0659
0660
0661
0662
0663 base = NULL;
0664 prime->dma_buf = dma_buf;
0665 }
0666 mutex_unlock(&prime->mutex);
0667
0668 ret = dma_buf_fd(dma_buf, flags);
0669 if (ret >= 0) {
0670 *prime_fd = ret;
0671 ret = 0;
0672 } else
0673 dma_buf_put(dma_buf);
0674
0675 out_unref:
0676 if (base)
0677 ttm_base_object_unref(&base);
0678 return ret;
0679 }
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
0695 struct ttm_prime_object *prime, bool shareable,
0696 enum ttm_object_type type,
0697 void (*refcount_release) (struct ttm_base_object **))
0698 {
0699 mutex_init(&prime->mutex);
0700 prime->size = PAGE_ALIGN(size);
0701 prime->real_type = type;
0702 prime->dma_buf = NULL;
0703 prime->refcount_release = refcount_release;
0704 return ttm_base_object_init(tfile, &prime->base, shareable,
0705 ttm_prime_type,
0706 ttm_prime_refcount_release);
0707 }