0001
0002
0003
0004
0005
0006
0007 #include <linux/pagevec.h>
0008 #include <linux/shmem_fs.h>
0009 #include <linux/swap.h>
0010
0011 #include <drm/drm_cache.h>
0012
0013 #include "gem/i915_gem_region.h"
0014 #include "i915_drv.h"
0015 #include "i915_gem_object.h"
0016 #include "i915_gem_tiling.h"
0017 #include "i915_gemfs.h"
0018 #include "i915_scatterlist.h"
0019 #include "i915_trace.h"
0020
0021
0022
0023
0024
0025 static void check_release_pagevec(struct pagevec *pvec)
0026 {
0027 check_move_unevictable_pages(pvec);
0028 __pagevec_release(pvec);
0029 cond_resched();
0030 }
0031
0032 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
0033 bool dirty, bool backup)
0034 {
0035 struct sgt_iter sgt_iter;
0036 struct pagevec pvec;
0037 struct page *page;
0038
0039 mapping_clear_unevictable(mapping);
0040
0041 pagevec_init(&pvec);
0042 for_each_sgt_page(page, sgt_iter, st) {
0043 if (dirty)
0044 set_page_dirty(page);
0045
0046 if (backup)
0047 mark_page_accessed(page);
0048
0049 if (!pagevec_add(&pvec, page))
0050 check_release_pagevec(&pvec);
0051 }
0052 if (pagevec_count(&pvec))
0053 check_release_pagevec(&pvec);
0054
0055 sg_free_table(st);
0056 }
0057
0058 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
0059 size_t size, struct intel_memory_region *mr,
0060 struct address_space *mapping,
0061 unsigned int max_segment)
0062 {
0063 const unsigned long page_count = size / PAGE_SIZE;
0064 unsigned long i;
0065 struct scatterlist *sg;
0066 struct page *page;
0067 unsigned long last_pfn = 0;
0068 gfp_t noreclaim;
0069 int ret;
0070
0071
0072
0073
0074
0075 if (size > resource_size(&mr->region))
0076 return -ENOMEM;
0077
0078 if (sg_alloc_table(st, page_count, GFP_KERNEL))
0079 return -ENOMEM;
0080
0081
0082
0083
0084
0085
0086
0087 mapping_set_unevictable(mapping);
0088 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
0089 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
0090
0091 sg = st->sgl;
0092 st->nents = 0;
0093 for (i = 0; i < page_count; i++) {
0094 const unsigned int shrink[] = {
0095 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0096 0,
0097 }, *s = shrink;
0098 gfp_t gfp = noreclaim;
0099
0100 do {
0101 cond_resched();
0102 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
0103 if (!IS_ERR(page))
0104 break;
0105
0106 if (!*s) {
0107 ret = PTR_ERR(page);
0108 goto err_sg;
0109 }
0110
0111 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 if (!*s) {
0123
0124 gfp = mapping_gfp_mask(mapping);
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 gfp |= __GFP_RETRY_MAYFAIL;
0141 }
0142 } while (1);
0143
0144 if (!i ||
0145 sg->length >= max_segment ||
0146 page_to_pfn(page) != last_pfn + 1) {
0147 if (i)
0148 sg = sg_next(sg);
0149
0150 st->nents++;
0151 sg_set_page(sg, page, PAGE_SIZE, 0);
0152 } else {
0153 sg->length += PAGE_SIZE;
0154 }
0155 last_pfn = page_to_pfn(page);
0156
0157
0158 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
0159 }
0160 if (sg)
0161 sg_mark_end(sg);
0162
0163
0164 i915_sg_trim(st);
0165
0166 return 0;
0167 err_sg:
0168 sg_mark_end(sg);
0169 if (sg != st->sgl) {
0170 shmem_sg_free_table(st, mapping, false, false);
0171 } else {
0172 mapping_clear_unevictable(mapping);
0173 sg_free_table(st);
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 if (ret == -ENOSPC)
0186 ret = -ENOMEM;
0187
0188 return ret;
0189 }
0190
0191 static int shmem_get_pages(struct drm_i915_gem_object *obj)
0192 {
0193 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0194 struct intel_memory_region *mem = obj->mm.region;
0195 struct address_space *mapping = obj->base.filp->f_mapping;
0196 const unsigned long page_count = obj->base.size / PAGE_SIZE;
0197 unsigned int max_segment = i915_sg_segment_size();
0198 struct sg_table *st;
0199 struct sgt_iter sgt_iter;
0200 struct page *page;
0201 int ret;
0202
0203
0204
0205
0206
0207
0208 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
0209 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
0210
0211 rebuild_st:
0212 st = kmalloc(sizeof(*st), GFP_KERNEL);
0213 if (!st)
0214 return -ENOMEM;
0215
0216 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
0217 max_segment);
0218 if (ret)
0219 goto err_st;
0220
0221 ret = i915_gem_gtt_prepare_pages(obj, st);
0222 if (ret) {
0223
0224
0225
0226
0227
0228 if (max_segment > PAGE_SIZE) {
0229 for_each_sgt_page(page, sgt_iter, st)
0230 put_page(page);
0231 sg_free_table(st);
0232 kfree(st);
0233
0234 max_segment = PAGE_SIZE;
0235 goto rebuild_st;
0236 } else {
0237 dev_warn(i915->drm.dev,
0238 "Failed to DMA remap %lu pages\n",
0239 page_count);
0240 goto err_pages;
0241 }
0242 }
0243
0244 if (i915_gem_object_needs_bit17_swizzle(obj))
0245 i915_gem_object_do_bit_17_swizzle(obj, st);
0246
0247 if (i915_gem_object_can_bypass_llc(obj))
0248 obj->cache_dirty = true;
0249
0250 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
0251
0252 return 0;
0253
0254 err_pages:
0255 shmem_sg_free_table(st, mapping, false, false);
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 err_st:
0266 if (ret == -ENOSPC)
0267 ret = -ENOMEM;
0268
0269 kfree(st);
0270
0271 return ret;
0272 }
0273
0274 static int
0275 shmem_truncate(struct drm_i915_gem_object *obj)
0276 {
0277
0278
0279
0280
0281
0282
0283 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
0284 obj->mm.madv = __I915_MADV_PURGED;
0285 obj->mm.pages = ERR_PTR(-EFAULT);
0286
0287 return 0;
0288 }
0289
0290 void __shmem_writeback(size_t size, struct address_space *mapping)
0291 {
0292 struct writeback_control wbc = {
0293 .sync_mode = WB_SYNC_NONE,
0294 .nr_to_write = SWAP_CLUSTER_MAX,
0295 .range_start = 0,
0296 .range_end = LLONG_MAX,
0297 .for_reclaim = 1,
0298 };
0299 unsigned long i;
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 for (i = 0; i < size >> PAGE_SHIFT; i++) {
0310 struct page *page;
0311
0312 page = find_lock_page(mapping, i);
0313 if (!page)
0314 continue;
0315
0316 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
0317 int ret;
0318
0319 SetPageReclaim(page);
0320 ret = mapping->a_ops->writepage(page, &wbc);
0321 if (!PageWriteback(page))
0322 ClearPageReclaim(page);
0323 if (!ret)
0324 goto put;
0325 }
0326 unlock_page(page);
0327 put:
0328 put_page(page);
0329 }
0330 }
0331
0332 static void
0333 shmem_writeback(struct drm_i915_gem_object *obj)
0334 {
0335 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
0336 }
0337
0338 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
0339 {
0340 switch (obj->mm.madv) {
0341 case I915_MADV_DONTNEED:
0342 return i915_gem_object_truncate(obj);
0343 case __I915_MADV_PURGED:
0344 return 0;
0345 }
0346
0347 if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
0348 shmem_writeback(obj);
0349
0350 return 0;
0351 }
0352
0353 void
0354 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
0355 struct sg_table *pages,
0356 bool needs_clflush)
0357 {
0358 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0359
0360 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
0361
0362 if (obj->mm.madv == I915_MADV_DONTNEED)
0363 obj->mm.dirty = false;
0364
0365 if (needs_clflush &&
0366 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
0367 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
0368 drm_clflush_sg(pages);
0369
0370 __start_cpu_write(obj);
0371
0372
0373
0374
0375
0376
0377
0378
0379 if (!HAS_LLC(i915))
0380 obj->cache_dirty = true;
0381 }
0382
0383 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
0384 {
0385 __i915_gem_object_release_shmem(obj, pages, true);
0386
0387 i915_gem_gtt_finish_pages(obj, pages);
0388
0389 if (i915_gem_object_needs_bit17_swizzle(obj))
0390 i915_gem_object_save_bit_17_swizzle(obj, pages);
0391
0392 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
0393 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
0394 kfree(pages);
0395 obj->mm.dirty = false;
0396 }
0397
0398 static void
0399 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
0400 {
0401 if (likely(i915_gem_object_has_struct_page(obj)))
0402 i915_gem_object_put_pages_shmem(obj, pages);
0403 else
0404 i915_gem_object_put_pages_phys(obj, pages);
0405 }
0406
0407 static int
0408 shmem_pwrite(struct drm_i915_gem_object *obj,
0409 const struct drm_i915_gem_pwrite *arg)
0410 {
0411 struct address_space *mapping = obj->base.filp->f_mapping;
0412 const struct address_space_operations *aops = mapping->a_ops;
0413 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
0414 u64 remain, offset;
0415 unsigned int pg;
0416
0417
0418 GEM_BUG_ON(!access_ok(user_data, arg->size));
0419
0420 if (!i915_gem_object_has_struct_page(obj))
0421 return i915_gem_object_pwrite_phys(obj, arg);
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 if (i915_gem_object_has_pages(obj))
0433 return -ENODEV;
0434
0435 if (obj->mm.madv != I915_MADV_WILLNEED)
0436 return -EFAULT;
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 remain = arg->size;
0447 offset = arg->offset;
0448 pg = offset_in_page(offset);
0449
0450 do {
0451 unsigned int len, unwritten;
0452 struct page *page;
0453 void *data, *vaddr;
0454 int err;
0455 char c;
0456
0457 len = PAGE_SIZE - pg;
0458 if (len > remain)
0459 len = remain;
0460
0461
0462 err = __get_user(c, user_data);
0463 if (err)
0464 return err;
0465
0466 err = __get_user(c, user_data + len - 1);
0467 if (err)
0468 return err;
0469
0470 err = aops->write_begin(obj->base.filp, mapping, offset, len,
0471 &page, &data);
0472 if (err < 0)
0473 return err;
0474
0475 vaddr = kmap_atomic(page);
0476 unwritten = __copy_from_user_inatomic(vaddr + pg,
0477 user_data,
0478 len);
0479 kunmap_atomic(vaddr);
0480
0481 err = aops->write_end(obj->base.filp, mapping, offset, len,
0482 len - unwritten, page, data);
0483 if (err < 0)
0484 return err;
0485
0486
0487 if (unwritten)
0488 return -ENODEV;
0489
0490 remain -= len;
0491 user_data += len;
0492 offset += len;
0493 pg = 0;
0494 } while (remain);
0495
0496 return 0;
0497 }
0498
0499 static int
0500 shmem_pread(struct drm_i915_gem_object *obj,
0501 const struct drm_i915_gem_pread *arg)
0502 {
0503 if (!i915_gem_object_has_struct_page(obj))
0504 return i915_gem_object_pread_phys(obj, arg);
0505
0506 return -ENODEV;
0507 }
0508
0509 static void shmem_release(struct drm_i915_gem_object *obj)
0510 {
0511 if (i915_gem_object_has_struct_page(obj))
0512 i915_gem_object_release_memory_region(obj);
0513
0514 fput(obj->base.filp);
0515 }
0516
0517 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
0518 .name = "i915_gem_object_shmem",
0519 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0520
0521 .get_pages = shmem_get_pages,
0522 .put_pages = shmem_put_pages,
0523 .truncate = shmem_truncate,
0524 .shrink = shmem_shrink,
0525
0526 .pwrite = shmem_pwrite,
0527 .pread = shmem_pread,
0528
0529 .release = shmem_release,
0530 };
0531
0532 static int __create_shmem(struct drm_i915_private *i915,
0533 struct drm_gem_object *obj,
0534 resource_size_t size)
0535 {
0536 unsigned long flags = VM_NORESERVE;
0537 struct file *filp;
0538
0539 drm_gem_private_object_init(&i915->drm, obj, size);
0540
0541 if (i915->mm.gemfs)
0542 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
0543 flags);
0544 else
0545 filp = shmem_file_setup("i915", size, flags);
0546 if (IS_ERR(filp))
0547 return PTR_ERR(filp);
0548
0549 obj->filp = filp;
0550 return 0;
0551 }
0552
0553 static int shmem_object_init(struct intel_memory_region *mem,
0554 struct drm_i915_gem_object *obj,
0555 resource_size_t offset,
0556 resource_size_t size,
0557 resource_size_t page_size,
0558 unsigned int flags)
0559 {
0560 static struct lock_class_key lock_class;
0561 struct drm_i915_private *i915 = mem->i915;
0562 struct address_space *mapping;
0563 unsigned int cache_level;
0564 gfp_t mask;
0565 int ret;
0566
0567 ret = __create_shmem(i915, &obj->base, size);
0568 if (ret)
0569 return ret;
0570
0571 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
0572 if (IS_I965GM(i915) || IS_I965G(i915)) {
0573
0574 mask &= ~__GFP_HIGHMEM;
0575 mask |= __GFP_DMA32;
0576 }
0577
0578 mapping = obj->base.filp->f_mapping;
0579 mapping_set_gfp_mask(mapping, mask);
0580 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
0581
0582 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
0583 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
0584 obj->write_domain = I915_GEM_DOMAIN_CPU;
0585 obj->read_domains = I915_GEM_DOMAIN_CPU;
0586
0587 if (HAS_LLC(i915))
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 cache_level = I915_CACHE_LLC;
0601 else
0602 cache_level = I915_CACHE_NONE;
0603
0604 i915_gem_object_set_cache_coherency(obj, cache_level);
0605
0606 i915_gem_object_init_memory_region(obj, mem);
0607
0608 return 0;
0609 }
0610
0611 struct drm_i915_gem_object *
0612 i915_gem_object_create_shmem(struct drm_i915_private *i915,
0613 resource_size_t size)
0614 {
0615 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
0616 size, 0, 0);
0617 }
0618
0619
0620 struct drm_i915_gem_object *
0621 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
0622 const void *data, resource_size_t size)
0623 {
0624 struct drm_i915_gem_object *obj;
0625 struct file *file;
0626 const struct address_space_operations *aops;
0627 resource_size_t offset;
0628 int err;
0629
0630 GEM_WARN_ON(IS_DGFX(dev_priv));
0631 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
0632 if (IS_ERR(obj))
0633 return obj;
0634
0635 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
0636
0637 file = obj->base.filp;
0638 aops = file->f_mapping->a_ops;
0639 offset = 0;
0640 do {
0641 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
0642 struct page *page;
0643 void *pgdata, *vaddr;
0644
0645 err = aops->write_begin(file, file->f_mapping, offset, len,
0646 &page, &pgdata);
0647 if (err < 0)
0648 goto fail;
0649
0650 vaddr = kmap(page);
0651 memcpy(vaddr, data, len);
0652 kunmap(page);
0653
0654 err = aops->write_end(file, file->f_mapping, offset, len, len,
0655 page, pgdata);
0656 if (err < 0)
0657 goto fail;
0658
0659 size -= len;
0660 data += len;
0661 offset += len;
0662 } while (size);
0663
0664 return obj;
0665
0666 fail:
0667 i915_gem_object_put(obj);
0668 return ERR_PTR(err);
0669 }
0670
0671 static int init_shmem(struct intel_memory_region *mem)
0672 {
0673 i915_gemfs_init(mem->i915);
0674 intel_memory_region_set_name(mem, "system");
0675
0676 return 0;
0677 }
0678
0679 static int release_shmem(struct intel_memory_region *mem)
0680 {
0681 i915_gemfs_fini(mem->i915);
0682 return 0;
0683 }
0684
0685 static const struct intel_memory_region_ops shmem_region_ops = {
0686 .init = init_shmem,
0687 .release = release_shmem,
0688 .init_object = shmem_object_init,
0689 };
0690
0691 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
0692 u16 type, u16 instance)
0693 {
0694 return intel_memory_region_create(i915, 0,
0695 totalram_pages() << PAGE_SHIFT,
0696 PAGE_SIZE, 0, 0,
0697 type, instance,
0698 &shmem_region_ops);
0699 }
0700
0701 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
0702 {
0703 return obj->ops == &i915_gem_shmem_ops;
0704 }