0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-mapping.h>
0008 #include <linux/seq_file.h>
0009 #include <linux/shmem_fs.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/pfn_t.h>
0012
0013 #include <drm/drm_prime.h>
0014 #include <drm/drm_vma_manager.h>
0015
0016 #include "omap_drv.h"
0017 #include "omap_dmm_tiler.h"
0018
0019
0020
0021
0022
0023
0024 #define OMAP_BO_MEM_DMA_API 0x01000000
0025 #define OMAP_BO_MEM_SHMEM 0x02000000
0026 #define OMAP_BO_MEM_DMABUF 0x08000000
0027
0028 struct omap_gem_object {
0029 struct drm_gem_object base;
0030
0031 struct list_head mm_list;
0032
0033 u32 flags;
0034
0035
0036 u16 width, height;
0037
0038
0039 u32 roll;
0040
0041
0042 struct mutex lock;
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 dma_addr_t dma_addr;
0066
0067
0068
0069
0070 refcount_t pin_cnt;
0071
0072
0073
0074
0075
0076 struct sg_table *sgt;
0077
0078
0079
0080
0081 struct tiler_block *block;
0082
0083
0084
0085
0086
0087 struct page **pages;
0088
0089
0090 dma_addr_t *dma_addrs;
0091
0092
0093
0094
0095 void *vaddr;
0096 };
0097
0098 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 #define NUM_USERGART_ENTRIES 2
0112 struct omap_drm_usergart_entry {
0113 struct tiler_block *block;
0114 dma_addr_t dma_addr;
0115 struct drm_gem_object *obj;
0116 pgoff_t obj_pgoff;
0117
0118 };
0119
0120 struct omap_drm_usergart {
0121 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
0122 int height;
0123 int height_shift;
0124 int slot_shift;
0125 int stride_pfn;
0126 int last;
0127 };
0128
0129
0130
0131
0132
0133
0134 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
0135 {
0136 struct drm_device *dev = obj->dev;
0137 int ret;
0138 size_t size;
0139
0140
0141 size = omap_gem_mmap_size(obj);
0142 ret = drm_gem_create_mmap_offset_size(obj, size);
0143 if (ret) {
0144 dev_err(dev->dev, "could not allocate mmap offset\n");
0145 return 0;
0146 }
0147
0148 return drm_vma_node_offset_addr(&obj->vma_node);
0149 }
0150
0151 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
0152 {
0153 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
0154 return true;
0155
0156 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
0157 return true;
0158
0159 return false;
0160 }
0161
0162
0163
0164
0165
0166 static void omap_gem_evict_entry(struct drm_gem_object *obj,
0167 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
0168 {
0169 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0170 struct omap_drm_private *priv = obj->dev->dev_private;
0171 int n = priv->usergart[fmt].height;
0172 size_t size = PAGE_SIZE * n;
0173 loff_t off = omap_gem_mmap_offset(obj) +
0174 (entry->obj_pgoff << PAGE_SHIFT);
0175 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
0176
0177 if (m > 1) {
0178 int i;
0179
0180 for (i = n; i > 0; i--) {
0181 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
0182 off, PAGE_SIZE, 1);
0183 off += PAGE_SIZE * m;
0184 }
0185 } else {
0186 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
0187 off, size, 1);
0188 }
0189
0190 entry->obj = NULL;
0191 }
0192
0193
0194 static void omap_gem_evict(struct drm_gem_object *obj)
0195 {
0196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0197 struct omap_drm_private *priv = obj->dev->dev_private;
0198
0199 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0200 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0201 int i;
0202
0203 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
0204 struct omap_drm_usergart_entry *entry =
0205 &priv->usergart[fmt].entry[i];
0206
0207 if (entry->obj == obj)
0208 omap_gem_evict_entry(obj, fmt, entry);
0209 }
0210 }
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 static int omap_gem_attach_pages(struct drm_gem_object *obj)
0222 {
0223 struct drm_device *dev = obj->dev;
0224 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0225 struct page **pages;
0226 int npages = obj->size >> PAGE_SHIFT;
0227 int i, ret;
0228 dma_addr_t *addrs;
0229
0230 lockdep_assert_held(&omap_obj->lock);
0231
0232
0233
0234
0235
0236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
0237 return 0;
0238
0239 pages = drm_gem_get_pages(obj);
0240 if (IS_ERR(pages)) {
0241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
0242 return PTR_ERR(pages);
0243 }
0244
0245
0246
0247
0248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
0249 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
0250 if (!addrs) {
0251 ret = -ENOMEM;
0252 goto free_pages;
0253 }
0254
0255 for (i = 0; i < npages; i++) {
0256 addrs[i] = dma_map_page(dev->dev, pages[i],
0257 0, PAGE_SIZE, DMA_TO_DEVICE);
0258
0259 if (dma_mapping_error(dev->dev, addrs[i])) {
0260 dev_warn(dev->dev,
0261 "%s: failed to map page\n", __func__);
0262
0263 for (i = i - 1; i >= 0; --i) {
0264 dma_unmap_page(dev->dev, addrs[i],
0265 PAGE_SIZE, DMA_TO_DEVICE);
0266 }
0267
0268 ret = -ENOMEM;
0269 goto free_addrs;
0270 }
0271 }
0272 } else {
0273 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
0274 if (!addrs) {
0275 ret = -ENOMEM;
0276 goto free_pages;
0277 }
0278 }
0279
0280 omap_obj->dma_addrs = addrs;
0281 omap_obj->pages = pages;
0282
0283 return 0;
0284
0285 free_addrs:
0286 kfree(addrs);
0287 free_pages:
0288 drm_gem_put_pages(obj, pages, true, false);
0289
0290 return ret;
0291 }
0292
0293
0294 static void omap_gem_detach_pages(struct drm_gem_object *obj)
0295 {
0296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0297 unsigned int npages = obj->size >> PAGE_SHIFT;
0298 unsigned int i;
0299
0300 lockdep_assert_held(&omap_obj->lock);
0301
0302 for (i = 0; i < npages; i++) {
0303 if (omap_obj->dma_addrs[i])
0304 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
0305 PAGE_SIZE, DMA_TO_DEVICE);
0306 }
0307
0308 kfree(omap_obj->dma_addrs);
0309 omap_obj->dma_addrs = NULL;
0310
0311 drm_gem_put_pages(obj, omap_obj->pages, true, false);
0312 omap_obj->pages = NULL;
0313 }
0314
0315
0316 u32 omap_gem_flags(struct drm_gem_object *obj)
0317 {
0318 return to_omap_bo(obj)->flags;
0319 }
0320
0321
0322 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
0323 {
0324 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0325 size_t size = obj->size;
0326
0327 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0328
0329
0330
0331
0332
0333
0334 size = tiler_vsize(gem2fmt(omap_obj->flags),
0335 omap_obj->width, omap_obj->height);
0336 }
0337
0338 return size;
0339 }
0340
0341
0342
0343
0344
0345
0346 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
0347 struct vm_area_struct *vma, struct vm_fault *vmf)
0348 {
0349 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0350 unsigned long pfn;
0351 pgoff_t pgoff;
0352
0353
0354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0355
0356 if (omap_obj->pages) {
0357 omap_gem_cpu_sync_page(obj, pgoff);
0358 pfn = page_to_pfn(omap_obj->pages[pgoff]);
0359 } else {
0360 BUG_ON(!omap_gem_is_contiguous(omap_obj));
0361 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
0362 }
0363
0364 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0365 pfn, pfn << PAGE_SHIFT);
0366
0367 return vmf_insert_mixed(vma, vmf->address,
0368 __pfn_to_pfn_t(pfn, PFN_DEV));
0369 }
0370
0371
0372 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
0373 struct vm_area_struct *vma, struct vm_fault *vmf)
0374 {
0375 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0376 struct omap_drm_private *priv = obj->dev->dev_private;
0377 struct omap_drm_usergart_entry *entry;
0378 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0379 struct page *pages[64];
0380 unsigned long pfn;
0381 pgoff_t pgoff, base_pgoff;
0382 unsigned long vaddr;
0383 int i, err, slots;
0384 vm_fault_t ret = VM_FAULT_NOPAGE;
0385
0386
0387
0388
0389
0390
0391 const int n = priv->usergart[fmt].height;
0392 const int n_shift = priv->usergart[fmt].height_shift;
0393
0394
0395
0396
0397
0398
0399
0400 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
0401
0402
0403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0404
0405
0406
0407
0408
0409 base_pgoff = round_down(pgoff, m << n_shift);
0410
0411
0412 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
0413
0414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
0415
0416 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
0417
0418
0419 if (entry->obj)
0420 omap_gem_evict_entry(entry->obj, fmt, entry);
0421
0422 entry->obj = obj;
0423 entry->obj_pgoff = base_pgoff;
0424
0425
0426 base_pgoff = (base_pgoff >> n_shift) * slots;
0427
0428
0429 if (m > 1) {
0430 int off = pgoff % m;
0431 entry->obj_pgoff += off;
0432 base_pgoff /= m;
0433 slots = min(slots - (off << n_shift), n);
0434 base_pgoff += off << n_shift;
0435 vaddr += off << PAGE_SHIFT;
0436 }
0437
0438
0439
0440
0441
0442
0443
0444
0445 memcpy(pages, &omap_obj->pages[base_pgoff],
0446 sizeof(struct page *) * slots);
0447 memset(pages + slots, 0,
0448 sizeof(struct page *) * (n - slots));
0449
0450 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
0451 if (err) {
0452 ret = vmf_error(err);
0453 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
0454 return ret;
0455 }
0456
0457 pfn = entry->dma_addr >> PAGE_SHIFT;
0458
0459 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0460 pfn, pfn << PAGE_SHIFT);
0461
0462 for (i = n; i > 0; i--) {
0463 ret = vmf_insert_mixed(vma,
0464 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
0465 if (ret & VM_FAULT_ERROR)
0466 break;
0467 pfn += priv->usergart[fmt].stride_pfn;
0468 vaddr += PAGE_SIZE * m;
0469 }
0470
0471
0472 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
0473 % NUM_USERGART_ENTRIES;
0474
0475 return ret;
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
0491 {
0492 struct vm_area_struct *vma = vmf->vma;
0493 struct drm_gem_object *obj = vma->vm_private_data;
0494 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0495 int err;
0496 vm_fault_t ret;
0497
0498
0499
0500
0501 mutex_lock(&omap_obj->lock);
0502
0503
0504 err = omap_gem_attach_pages(obj);
0505 if (err) {
0506 ret = vmf_error(err);
0507 goto fail;
0508 }
0509
0510
0511
0512
0513
0514
0515
0516 if (omap_obj->flags & OMAP_BO_TILED_MASK)
0517 ret = omap_gem_fault_2d(obj, vma, vmf);
0518 else
0519 ret = omap_gem_fault_1d(obj, vma, vmf);
0520
0521
0522 fail:
0523 mutex_unlock(&omap_obj->lock);
0524 return ret;
0525 }
0526
0527
0528 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
0529 {
0530 int ret;
0531
0532 ret = drm_gem_mmap(filp, vma);
0533 if (ret) {
0534 DBG("mmap failed: %d", ret);
0535 return ret;
0536 }
0537
0538 return omap_gem_mmap_obj(vma->vm_private_data, vma);
0539 }
0540
0541 int omap_gem_mmap_obj(struct drm_gem_object *obj,
0542 struct vm_area_struct *vma)
0543 {
0544 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0545
0546 vma->vm_flags &= ~VM_PFNMAP;
0547 vma->vm_flags |= VM_MIXEDMAP;
0548
0549 if (omap_obj->flags & OMAP_BO_WC) {
0550 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
0551 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
0552 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
0553 } else {
0554
0555
0556
0557
0558
0559 if (WARN_ON(!obj->filp))
0560 return -EINVAL;
0561
0562
0563
0564
0565
0566
0567 vma->vm_pgoff = 0;
0568 vma_set_file(vma, obj->filp);
0569
0570 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0571 }
0572
0573 return 0;
0574 }
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0591 struct drm_mode_create_dumb *args)
0592 {
0593 union omap_gem_size gsize;
0594
0595 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0596
0597 args->size = PAGE_ALIGN(args->pitch * args->height);
0598
0599 gsize = (union omap_gem_size){
0600 .bytes = args->size,
0601 };
0602
0603 return omap_gem_new_handle(dev, file, gsize,
0604 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
0605 }
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0618 u32 handle, u64 *offset)
0619 {
0620 struct drm_gem_object *obj;
0621 int ret = 0;
0622
0623
0624 obj = drm_gem_object_lookup(file, handle);
0625 if (obj == NULL) {
0626 ret = -ENOENT;
0627 goto fail;
0628 }
0629
0630 *offset = omap_gem_mmap_offset(obj);
0631
0632 drm_gem_object_put(obj);
0633
0634 fail:
0635 return ret;
0636 }
0637
0638 #ifdef CONFIG_DRM_FBDEV_EMULATION
0639
0640
0641
0642
0643
0644 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
0645 {
0646 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0647 u32 npages = obj->size >> PAGE_SHIFT;
0648 int ret = 0;
0649
0650 if (roll > npages) {
0651 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
0652 return -EINVAL;
0653 }
0654
0655 omap_obj->roll = roll;
0656
0657 mutex_lock(&omap_obj->lock);
0658
0659
0660 if (omap_obj->block) {
0661 ret = omap_gem_attach_pages(obj);
0662 if (ret)
0663 goto fail;
0664
0665 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
0666 roll, true);
0667 if (ret)
0668 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
0669 }
0670
0671 fail:
0672 mutex_unlock(&omap_obj->lock);
0673
0674 return ret;
0675 }
0676 #endif
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
0692 {
0693 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0694
0695 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
0696 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
0697 }
0698
0699
0700
0701
0702 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
0703 {
0704 struct drm_device *dev = obj->dev;
0705 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0706
0707 if (omap_gem_is_cached_coherent(obj))
0708 return;
0709
0710 if (omap_obj->dma_addrs[pgoff]) {
0711 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
0712 PAGE_SIZE, DMA_TO_DEVICE);
0713 omap_obj->dma_addrs[pgoff] = 0;
0714 }
0715 }
0716
0717
0718 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
0719 enum dma_data_direction dir)
0720 {
0721 struct drm_device *dev = obj->dev;
0722 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0723 int i, npages = obj->size >> PAGE_SHIFT;
0724 struct page **pages = omap_obj->pages;
0725 bool dirty = false;
0726
0727 if (omap_gem_is_cached_coherent(obj))
0728 return;
0729
0730 for (i = 0; i < npages; i++) {
0731 if (!omap_obj->dma_addrs[i]) {
0732 dma_addr_t addr;
0733
0734 addr = dma_map_page(dev->dev, pages[i], 0,
0735 PAGE_SIZE, dir);
0736 if (dma_mapping_error(dev->dev, addr)) {
0737 dev_warn(dev->dev, "%s: failed to map page\n",
0738 __func__);
0739 break;
0740 }
0741
0742 dirty = true;
0743 omap_obj->dma_addrs[i] = addr;
0744 }
0745 }
0746
0747 if (dirty) {
0748 unmap_mapping_range(obj->filp->f_mapping, 0,
0749 omap_gem_mmap_size(obj), 1);
0750 }
0751 }
0752
0753 static int omap_gem_pin_tiler(struct drm_gem_object *obj)
0754 {
0755 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0756 u32 npages = obj->size >> PAGE_SHIFT;
0757 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0758 struct tiler_block *block;
0759 int ret;
0760
0761 BUG_ON(omap_obj->block);
0762
0763 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0764 block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
0765 PAGE_SIZE);
0766 } else {
0767 block = tiler_reserve_1d(obj->size);
0768 }
0769
0770 if (IS_ERR(block)) {
0771 ret = PTR_ERR(block);
0772 dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
0773 goto fail;
0774 }
0775
0776
0777 ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
0778 if (ret) {
0779 tiler_release(block);
0780 dev_err(obj->dev->dev, "could not pin: %d\n", ret);
0781 goto fail;
0782 }
0783
0784 omap_obj->dma_addr = tiler_ssptr(block);
0785 omap_obj->block = block;
0786
0787 DBG("got dma address: %pad", &omap_obj->dma_addr);
0788
0789 fail:
0790 return ret;
0791 }
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
0808 {
0809 struct omap_drm_private *priv = obj->dev->dev_private;
0810 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0811 int ret = 0;
0812
0813 mutex_lock(&omap_obj->lock);
0814
0815 if (!omap_gem_is_contiguous(omap_obj)) {
0816 if (refcount_read(&omap_obj->pin_cnt) == 0) {
0817
0818 refcount_set(&omap_obj->pin_cnt, 1);
0819
0820 ret = omap_gem_attach_pages(obj);
0821 if (ret)
0822 goto fail;
0823
0824 if (omap_obj->flags & OMAP_BO_SCANOUT) {
0825 if (priv->has_dmm) {
0826 ret = omap_gem_pin_tiler(obj);
0827 if (ret)
0828 goto fail;
0829 }
0830 }
0831 } else {
0832 refcount_inc(&omap_obj->pin_cnt);
0833 }
0834 }
0835
0836 if (dma_addr)
0837 *dma_addr = omap_obj->dma_addr;
0838
0839 fail:
0840 mutex_unlock(&omap_obj->lock);
0841
0842 return ret;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
0852 {
0853 struct omap_drm_private *priv = obj->dev->dev_private;
0854 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0855 int ret;
0856
0857 if (omap_gem_is_contiguous(omap_obj))
0858 return;
0859
0860 if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
0861 if (omap_obj->sgt) {
0862 sg_free_table(omap_obj->sgt);
0863 kfree(omap_obj->sgt);
0864 omap_obj->sgt = NULL;
0865 }
0866 if (!(omap_obj->flags & OMAP_BO_SCANOUT))
0867 return;
0868 if (priv->has_dmm) {
0869 ret = tiler_unpin(omap_obj->block);
0870 if (ret) {
0871 dev_err(obj->dev->dev,
0872 "could not unpin pages: %d\n", ret);
0873 }
0874 ret = tiler_release(omap_obj->block);
0875 if (ret) {
0876 dev_err(obj->dev->dev,
0877 "could not release unmap: %d\n", ret);
0878 }
0879 omap_obj->dma_addr = 0;
0880 omap_obj->block = NULL;
0881 }
0882 }
0883 }
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 void omap_gem_unpin(struct drm_gem_object *obj)
0894 {
0895 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0896
0897 mutex_lock(&omap_obj->lock);
0898 omap_gem_unpin_locked(obj);
0899 mutex_unlock(&omap_obj->lock);
0900 }
0901
0902
0903
0904
0905
0906 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
0907 int x, int y, dma_addr_t *dma_addr)
0908 {
0909 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0910 int ret = -EINVAL;
0911
0912 mutex_lock(&omap_obj->lock);
0913
0914 if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
0915 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
0916 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
0917 ret = 0;
0918 }
0919
0920 mutex_unlock(&omap_obj->lock);
0921
0922 return ret;
0923 }
0924
0925
0926 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
0927 {
0928 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0929 int ret = -EINVAL;
0930 if (omap_obj->flags & OMAP_BO_TILED_MASK)
0931 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
0932 return ret;
0933 }
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
0946 bool remap)
0947 {
0948 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0949 int ret = 0;
0950
0951 mutex_lock(&omap_obj->lock);
0952
0953 if (remap) {
0954 ret = omap_gem_attach_pages(obj);
0955 if (ret)
0956 goto unlock;
0957 }
0958
0959 if (!omap_obj->pages) {
0960 ret = -ENOMEM;
0961 goto unlock;
0962 }
0963
0964 *pages = omap_obj->pages;
0965
0966 unlock:
0967 mutex_unlock(&omap_obj->lock);
0968
0969 return ret;
0970 }
0971
0972
0973 int omap_gem_put_pages(struct drm_gem_object *obj)
0974 {
0975
0976
0977
0978
0979 return 0;
0980 }
0981
0982 struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
0983 enum dma_data_direction dir)
0984 {
0985 struct omap_gem_object *omap_obj = to_omap_bo(obj);
0986 dma_addr_t addr;
0987 struct sg_table *sgt;
0988 struct scatterlist *sg;
0989 unsigned int count, len, stride, i;
0990 int ret;
0991
0992 ret = omap_gem_pin(obj, &addr);
0993 if (ret)
0994 return ERR_PTR(ret);
0995
0996 mutex_lock(&omap_obj->lock);
0997
0998 sgt = omap_obj->sgt;
0999 if (sgt)
1000 goto out;
1001
1002 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1003 if (!sgt) {
1004 ret = -ENOMEM;
1005 goto err_unpin;
1006 }
1007
1008 if (addr) {
1009 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1010 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1011
1012 len = omap_obj->width << (int)fmt;
1013 count = omap_obj->height;
1014 stride = tiler_stride(fmt, 0);
1015 } else {
1016 len = obj->size;
1017 count = 1;
1018 stride = 0;
1019 }
1020 } else {
1021 count = obj->size >> PAGE_SHIFT;
1022 }
1023
1024 ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1025 if (ret)
1026 goto err_free;
1027
1028
1029 omap_gem_dma_sync_buffer(obj, dir);
1030
1031 if (addr) {
1032 for_each_sg(sgt->sgl, sg, count, i) {
1033 sg_set_page(sg, phys_to_page(addr), len,
1034 offset_in_page(addr));
1035 sg_dma_address(sg) = addr;
1036 sg_dma_len(sg) = len;
1037
1038 addr += stride;
1039 }
1040 } else {
1041 for_each_sg(sgt->sgl, sg, count, i) {
1042 sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1043 sg_dma_address(sg) = omap_obj->dma_addrs[i];
1044 sg_dma_len(sg) = PAGE_SIZE;
1045 }
1046 }
1047
1048 omap_obj->sgt = sgt;
1049 out:
1050 mutex_unlock(&omap_obj->lock);
1051 return sgt;
1052
1053 err_free:
1054 kfree(sgt);
1055 err_unpin:
1056 mutex_unlock(&omap_obj->lock);
1057 omap_gem_unpin(obj);
1058 return ERR_PTR(ret);
1059 }
1060
1061 void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1062 {
1063 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1064
1065 if (WARN_ON(omap_obj->sgt != sgt))
1066 return;
1067
1068 omap_gem_unpin(obj);
1069 }
1070
1071 #ifdef CONFIG_DRM_FBDEV_EMULATION
1072
1073
1074
1075
1076 void *omap_gem_vaddr(struct drm_gem_object *obj)
1077 {
1078 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1079 void *vaddr;
1080 int ret;
1081
1082 mutex_lock(&omap_obj->lock);
1083
1084 if (!omap_obj->vaddr) {
1085 ret = omap_gem_attach_pages(obj);
1086 if (ret) {
1087 vaddr = ERR_PTR(ret);
1088 goto unlock;
1089 }
1090
1091 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1092 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1093 }
1094
1095 vaddr = omap_obj->vaddr;
1096
1097 unlock:
1098 mutex_unlock(&omap_obj->lock);
1099 return vaddr;
1100 }
1101 #endif
1102
1103
1104
1105
1106
1107 #ifdef CONFIG_PM
1108
1109 int omap_gem_resume(struct drm_device *dev)
1110 {
1111 struct omap_drm_private *priv = dev->dev_private;
1112 struct omap_gem_object *omap_obj;
1113 int ret = 0;
1114
1115 mutex_lock(&priv->list_lock);
1116 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1117 if (omap_obj->block) {
1118 struct drm_gem_object *obj = &omap_obj->base;
1119 u32 npages = obj->size >> PAGE_SHIFT;
1120
1121 WARN_ON(!omap_obj->pages);
1122 ret = tiler_pin(omap_obj->block,
1123 omap_obj->pages, npages,
1124 omap_obj->roll, true);
1125 if (ret) {
1126 dev_err(dev->dev, "could not repin: %d\n", ret);
1127 goto done;
1128 }
1129 }
1130 }
1131
1132 done:
1133 mutex_unlock(&priv->list_lock);
1134 return ret;
1135 }
1136 #endif
1137
1138
1139
1140
1141
1142 #ifdef CONFIG_DEBUG_FS
1143 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1144 {
1145 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1146 u64 off;
1147
1148 off = drm_vma_node_start(&obj->vma_node);
1149
1150 mutex_lock(&omap_obj->lock);
1151
1152 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1153 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1154 off, &omap_obj->dma_addr,
1155 refcount_read(&omap_obj->pin_cnt),
1156 omap_obj->vaddr, omap_obj->roll);
1157
1158 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1159 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1160 if (omap_obj->block) {
1161 struct tcm_area *area = &omap_obj->block->area;
1162 seq_printf(m, " (%dx%d, %dx%d)",
1163 area->p0.x, area->p0.y,
1164 area->p1.x, area->p1.y);
1165 }
1166 } else {
1167 seq_printf(m, " %zu", obj->size);
1168 }
1169
1170 mutex_unlock(&omap_obj->lock);
1171
1172 seq_printf(m, "\n");
1173 }
1174
1175 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1176 {
1177 struct omap_gem_object *omap_obj;
1178 int count = 0;
1179 size_t size = 0;
1180
1181 list_for_each_entry(omap_obj, list, mm_list) {
1182 struct drm_gem_object *obj = &omap_obj->base;
1183 seq_printf(m, " ");
1184 omap_gem_describe(obj, m);
1185 count++;
1186 size += obj->size;
1187 }
1188
1189 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1190 }
1191 #endif
1192
1193
1194
1195
1196
1197 static void omap_gem_free_object(struct drm_gem_object *obj)
1198 {
1199 struct drm_device *dev = obj->dev;
1200 struct omap_drm_private *priv = dev->dev_private;
1201 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1202
1203 omap_gem_evict(obj);
1204
1205 mutex_lock(&priv->list_lock);
1206 list_del(&omap_obj->mm_list);
1207 mutex_unlock(&priv->list_lock);
1208
1209
1210
1211
1212
1213
1214
1215 mutex_lock(&omap_obj->lock);
1216
1217
1218 WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1219
1220 if (omap_obj->pages) {
1221 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1222 kfree(omap_obj->pages);
1223 else
1224 omap_gem_detach_pages(obj);
1225 }
1226
1227 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1228 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1229 omap_obj->dma_addr);
1230 } else if (omap_obj->vaddr) {
1231 vunmap(omap_obj->vaddr);
1232 } else if (obj->import_attach) {
1233 drm_prime_gem_destroy(obj, omap_obj->sgt);
1234 }
1235
1236 mutex_unlock(&omap_obj->lock);
1237
1238 drm_gem_object_release(obj);
1239
1240 mutex_destroy(&omap_obj->lock);
1241
1242 kfree(omap_obj);
1243 }
1244
1245 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1246 {
1247 struct omap_drm_private *priv = dev->dev_private;
1248
1249 switch (flags & OMAP_BO_CACHE_MASK) {
1250 case OMAP_BO_CACHED:
1251 case OMAP_BO_WC:
1252 case OMAP_BO_CACHE_MASK:
1253 break;
1254
1255 default:
1256 return false;
1257 }
1258
1259 if (flags & OMAP_BO_TILED_MASK) {
1260 if (!priv->usergart)
1261 return false;
1262
1263 switch (flags & OMAP_BO_TILED_MASK) {
1264 case OMAP_BO_TILED_8:
1265 case OMAP_BO_TILED_16:
1266 case OMAP_BO_TILED_32:
1267 break;
1268
1269 default:
1270 return false;
1271 }
1272 }
1273
1274 return true;
1275 }
1276
1277 static const struct vm_operations_struct omap_gem_vm_ops = {
1278 .fault = omap_gem_fault,
1279 .open = drm_gem_vm_open,
1280 .close = drm_gem_vm_close,
1281 };
1282
1283 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1284 .free = omap_gem_free_object,
1285 .export = omap_gem_prime_export,
1286 .vm_ops = &omap_gem_vm_ops,
1287 };
1288
1289
1290 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1291 union omap_gem_size gsize, u32 flags)
1292 {
1293 struct omap_drm_private *priv = dev->dev_private;
1294 struct omap_gem_object *omap_obj;
1295 struct drm_gem_object *obj;
1296 struct address_space *mapping;
1297 size_t size;
1298 int ret;
1299
1300 if (!omap_gem_validate_flags(dev, flags))
1301 return NULL;
1302
1303
1304 if (flags & OMAP_BO_TILED_MASK) {
1305
1306
1307
1308
1309 flags |= OMAP_BO_MEM_SHMEM;
1310
1311
1312
1313
1314
1315 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1316 flags |= tiler_get_cpu_cache_flags();
1317 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1318
1319
1320
1321
1322 flags |= OMAP_BO_MEM_DMA_API;
1323 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1324
1325
1326
1327 flags |= OMAP_BO_MEM_SHMEM;
1328 }
1329
1330
1331 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1332 if (!omap_obj)
1333 return NULL;
1334
1335 obj = &omap_obj->base;
1336 omap_obj->flags = flags;
1337 mutex_init(&omap_obj->lock);
1338
1339 if (flags & OMAP_BO_TILED_MASK) {
1340
1341
1342
1343
1344 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1345 &gsize.tiled.height);
1346
1347 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1348 gsize.tiled.height);
1349
1350 omap_obj->width = gsize.tiled.width;
1351 omap_obj->height = gsize.tiled.height;
1352 } else {
1353 size = PAGE_ALIGN(gsize.bytes);
1354 }
1355
1356 obj->funcs = &omap_gem_object_funcs;
1357
1358
1359 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1360 drm_gem_private_object_init(dev, obj, size);
1361 } else {
1362 ret = drm_gem_object_init(dev, obj, size);
1363 if (ret)
1364 goto err_free;
1365
1366 mapping = obj->filp->f_mapping;
1367 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1368 }
1369
1370
1371 if (flags & OMAP_BO_MEM_DMA_API) {
1372 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1373 &omap_obj->dma_addr,
1374 GFP_KERNEL);
1375 if (!omap_obj->vaddr)
1376 goto err_release;
1377 }
1378
1379 mutex_lock(&priv->list_lock);
1380 list_add(&omap_obj->mm_list, &priv->obj_list);
1381 mutex_unlock(&priv->list_lock);
1382
1383 return obj;
1384
1385 err_release:
1386 drm_gem_object_release(obj);
1387 err_free:
1388 kfree(omap_obj);
1389 return NULL;
1390 }
1391
1392 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1393 struct sg_table *sgt)
1394 {
1395 struct omap_drm_private *priv = dev->dev_private;
1396 struct omap_gem_object *omap_obj;
1397 struct drm_gem_object *obj;
1398 union omap_gem_size gsize;
1399
1400
1401 if (sgt->orig_nents != 1 && !priv->has_dmm)
1402 return ERR_PTR(-EINVAL);
1403
1404 gsize.bytes = PAGE_ALIGN(size);
1405 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1406 if (!obj)
1407 return ERR_PTR(-ENOMEM);
1408
1409 omap_obj = to_omap_bo(obj);
1410
1411 mutex_lock(&omap_obj->lock);
1412
1413 omap_obj->sgt = sgt;
1414
1415 if (sgt->orig_nents == 1) {
1416 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1417 } else {
1418
1419 struct page **pages;
1420 unsigned int npages;
1421 unsigned int ret;
1422
1423 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1424 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1425 if (!pages) {
1426 omap_gem_free_object(obj);
1427 obj = ERR_PTR(-ENOMEM);
1428 goto done;
1429 }
1430
1431 omap_obj->pages = pages;
1432 ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1433 if (ret) {
1434 omap_gem_free_object(obj);
1435 obj = ERR_PTR(-ENOMEM);
1436 goto done;
1437 }
1438 }
1439
1440 done:
1441 mutex_unlock(&omap_obj->lock);
1442 return obj;
1443 }
1444
1445
1446 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1447 union omap_gem_size gsize, u32 flags, u32 *handle)
1448 {
1449 struct drm_gem_object *obj;
1450 int ret;
1451
1452 obj = omap_gem_new(dev, gsize, flags);
1453 if (!obj)
1454 return -ENOMEM;
1455
1456 ret = drm_gem_handle_create(file, obj, handle);
1457 if (ret) {
1458 omap_gem_free_object(obj);
1459 return ret;
1460 }
1461
1462
1463 drm_gem_object_put(obj);
1464
1465 return 0;
1466 }
1467
1468
1469
1470
1471
1472
1473 void omap_gem_init(struct drm_device *dev)
1474 {
1475 struct omap_drm_private *priv = dev->dev_private;
1476 struct omap_drm_usergart *usergart;
1477 const enum tiler_fmt fmts[] = {
1478 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1479 };
1480 int i, j;
1481
1482 if (!dmm_is_available()) {
1483
1484 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1485 return;
1486 }
1487
1488 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1489 if (!usergart)
1490 return;
1491
1492
1493 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1494 u16 h = 1, w = PAGE_SIZE >> i;
1495
1496 tiler_align(fmts[i], &w, &h);
1497
1498
1499
1500
1501 usergart[i].height = h;
1502 usergart[i].height_shift = ilog2(h);
1503 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1504 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1505 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1506 struct omap_drm_usergart_entry *entry;
1507 struct tiler_block *block;
1508
1509 entry = &usergart[i].entry[j];
1510 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1511 if (IS_ERR(block)) {
1512 dev_err(dev->dev,
1513 "reserve failed: %d, %d, %ld\n",
1514 i, j, PTR_ERR(block));
1515 return;
1516 }
1517 entry->dma_addr = tiler_ssptr(block);
1518 entry->block = block;
1519
1520 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1521 &entry->dma_addr,
1522 usergart[i].stride_pfn << PAGE_SHIFT);
1523 }
1524 }
1525
1526 priv->usergart = usergart;
1527 priv->has_dmm = true;
1528 }
1529
1530 void omap_gem_deinit(struct drm_device *dev)
1531 {
1532 struct omap_drm_private *priv = dev->dev_private;
1533
1534
1535
1536
1537 kfree(priv->usergart);
1538 }