0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/export.h>
0037 #include <linux/pci.h>
0038 #include <linux/seq_file.h>
0039 #include <linux/vmalloc.h>
0040 #include <linux/pgtable.h>
0041
0042 #if defined(__ia64__)
0043 #include <linux/efi.h>
0044 #include <linux/slab.h>
0045 #endif
0046 #include <linux/mem_encrypt.h>
0047
0048 #include <drm/drm_device.h>
0049 #include <drm/drm_drv.h>
0050 #include <drm/drm_file.h>
0051 #include <drm/drm_framebuffer.h>
0052 #include <drm/drm_print.h>
0053
0054 #include "drm_internal.h"
0055 #include "drm_legacy.h"
0056
0057 struct drm_vma_entry {
0058 struct list_head head;
0059 struct vm_area_struct *vma;
0060 pid_t pid;
0061 };
0062
0063 static void drm_vm_open(struct vm_area_struct *vma);
0064 static void drm_vm_close(struct vm_area_struct *vma);
0065
0066 static pgprot_t drm_io_prot(struct drm_local_map *map,
0067 struct vm_area_struct *vma)
0068 {
0069 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
0070
0071 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
0072 defined(__mips__) || defined(__loongarch__)
0073 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
0074 tmp = pgprot_noncached(tmp);
0075 else
0076 tmp = pgprot_writecombine(tmp);
0077 #elif defined(__ia64__)
0078 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
0079 vma->vm_start))
0080 tmp = pgprot_writecombine(tmp);
0081 else
0082 tmp = pgprot_noncached(tmp);
0083 #elif defined(__sparc__) || defined(__arm__)
0084 tmp = pgprot_noncached(tmp);
0085 #endif
0086 return tmp;
0087 }
0088
0089 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
0090 {
0091 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
0092
0093 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
0094 tmp = pgprot_noncached_wc(tmp);
0095 #endif
0096 return tmp;
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 #if IS_ENABLED(CONFIG_AGP)
0110 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
0111 {
0112 struct vm_area_struct *vma = vmf->vma;
0113 struct drm_file *priv = vma->vm_file->private_data;
0114 struct drm_device *dev = priv->minor->dev;
0115 struct drm_local_map *map = NULL;
0116 struct drm_map_list *r_list;
0117 struct drm_hash_item *hash;
0118
0119
0120
0121
0122 if (!dev->agp)
0123 goto vm_fault_error;
0124
0125 if (!dev->agp || !dev->agp->cant_use_aperture)
0126 goto vm_fault_error;
0127
0128 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
0129 goto vm_fault_error;
0130
0131 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
0132 map = r_list->map;
0133
0134 if (map && map->type == _DRM_AGP) {
0135
0136
0137
0138
0139 resource_size_t offset = vmf->address - vma->vm_start;
0140 resource_size_t baddr = map->offset + offset;
0141 struct drm_agp_mem *agpmem;
0142 struct page *page;
0143
0144 #ifdef __alpha__
0145
0146
0147
0148 baddr -= dev->hose->mem_space->start;
0149 #endif
0150
0151
0152
0153
0154 list_for_each_entry(agpmem, &dev->agp->memory, head) {
0155 if (agpmem->bound <= baddr &&
0156 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
0157 break;
0158 }
0159
0160 if (&agpmem->head == &dev->agp->memory)
0161 goto vm_fault_error;
0162
0163
0164
0165
0166 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
0167 page = agpmem->memory->pages[offset];
0168 get_page(page);
0169 vmf->page = page;
0170
0171 DRM_DEBUG
0172 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
0173 (unsigned long long)baddr,
0174 agpmem->memory->pages[offset],
0175 (unsigned long long)offset,
0176 page_count(page));
0177 return 0;
0178 }
0179 vm_fault_error:
0180 return VM_FAULT_SIGBUS;
0181 }
0182 #else
0183 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
0184 {
0185 return VM_FAULT_SIGBUS;
0186 }
0187 #endif
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
0200 {
0201 struct vm_area_struct *vma = vmf->vma;
0202 struct drm_local_map *map = vma->vm_private_data;
0203 unsigned long offset;
0204 unsigned long i;
0205 struct page *page;
0206
0207 if (!map)
0208 return VM_FAULT_SIGBUS;
0209
0210 offset = vmf->address - vma->vm_start;
0211 i = (unsigned long)map->handle + offset;
0212 page = vmalloc_to_page((void *)i);
0213 if (!page)
0214 return VM_FAULT_SIGBUS;
0215 get_page(page);
0216 vmf->page = page;
0217
0218 DRM_DEBUG("shm_fault 0x%lx\n", offset);
0219 return 0;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 static void drm_vm_shm_close(struct vm_area_struct *vma)
0231 {
0232 struct drm_file *priv = vma->vm_file->private_data;
0233 struct drm_device *dev = priv->minor->dev;
0234 struct drm_vma_entry *pt, *temp;
0235 struct drm_local_map *map;
0236 struct drm_map_list *r_list;
0237 int found_maps = 0;
0238
0239 DRM_DEBUG("0x%08lx,0x%08lx\n",
0240 vma->vm_start, vma->vm_end - vma->vm_start);
0241
0242 map = vma->vm_private_data;
0243
0244 mutex_lock(&dev->struct_mutex);
0245 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
0246 if (pt->vma->vm_private_data == map)
0247 found_maps++;
0248 if (pt->vma == vma) {
0249 list_del(&pt->head);
0250 kfree(pt);
0251 }
0252 }
0253
0254
0255 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
0256
0257
0258
0259 found_maps = 0;
0260 list_for_each_entry(r_list, &dev->maplist, head) {
0261 if (r_list->map == map)
0262 found_maps++;
0263 }
0264
0265 if (!found_maps) {
0266 switch (map->type) {
0267 case _DRM_REGISTERS:
0268 case _DRM_FRAME_BUFFER:
0269 arch_phys_wc_del(map->mtrr);
0270 iounmap(map->handle);
0271 break;
0272 case _DRM_SHM:
0273 vfree(map->handle);
0274 break;
0275 case _DRM_AGP:
0276 case _DRM_SCATTER_GATHER:
0277 break;
0278 case _DRM_CONSISTENT:
0279 dma_free_coherent(dev->dev,
0280 map->size,
0281 map->handle,
0282 map->offset);
0283 break;
0284 }
0285 kfree(map);
0286 }
0287 }
0288 mutex_unlock(&dev->struct_mutex);
0289 }
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
0300 {
0301 struct vm_area_struct *vma = vmf->vma;
0302 struct drm_file *priv = vma->vm_file->private_data;
0303 struct drm_device *dev = priv->minor->dev;
0304 struct drm_device_dma *dma = dev->dma;
0305 unsigned long offset;
0306 unsigned long page_nr;
0307 struct page *page;
0308
0309 if (!dma)
0310 return VM_FAULT_SIGBUS;
0311 if (!dma->pagelist)
0312 return VM_FAULT_SIGBUS;
0313
0314 offset = vmf->address - vma->vm_start;
0315
0316 page_nr = offset >> PAGE_SHIFT;
0317 page = virt_to_page((void *)dma->pagelist[page_nr]);
0318
0319 get_page(page);
0320 vmf->page = page;
0321
0322 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
0323 return 0;
0324 }
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
0335 {
0336 struct vm_area_struct *vma = vmf->vma;
0337 struct drm_local_map *map = vma->vm_private_data;
0338 struct drm_file *priv = vma->vm_file->private_data;
0339 struct drm_device *dev = priv->minor->dev;
0340 struct drm_sg_mem *entry = dev->sg;
0341 unsigned long offset;
0342 unsigned long map_offset;
0343 unsigned long page_offset;
0344 struct page *page;
0345
0346 if (!entry)
0347 return VM_FAULT_SIGBUS;
0348 if (!entry->pagelist)
0349 return VM_FAULT_SIGBUS;
0350
0351 offset = vmf->address - vma->vm_start;
0352 map_offset = map->offset - (unsigned long)dev->sg->virtual;
0353 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
0354 page = entry->pagelist[page_offset];
0355 get_page(page);
0356 vmf->page = page;
0357
0358 return 0;
0359 }
0360
0361
0362 static const struct vm_operations_struct drm_vm_ops = {
0363 .fault = drm_vm_fault,
0364 .open = drm_vm_open,
0365 .close = drm_vm_close,
0366 };
0367
0368
0369 static const struct vm_operations_struct drm_vm_shm_ops = {
0370 .fault = drm_vm_shm_fault,
0371 .open = drm_vm_open,
0372 .close = drm_vm_shm_close,
0373 };
0374
0375
0376 static const struct vm_operations_struct drm_vm_dma_ops = {
0377 .fault = drm_vm_dma_fault,
0378 .open = drm_vm_open,
0379 .close = drm_vm_close,
0380 };
0381
0382
0383 static const struct vm_operations_struct drm_vm_sg_ops = {
0384 .fault = drm_vm_sg_fault,
0385 .open = drm_vm_open,
0386 .close = drm_vm_close,
0387 };
0388
0389 static void drm_vm_open_locked(struct drm_device *dev,
0390 struct vm_area_struct *vma)
0391 {
0392 struct drm_vma_entry *vma_entry;
0393
0394 DRM_DEBUG("0x%08lx,0x%08lx\n",
0395 vma->vm_start, vma->vm_end - vma->vm_start);
0396
0397 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
0398 if (vma_entry) {
0399 vma_entry->vma = vma;
0400 vma_entry->pid = current->pid;
0401 list_add(&vma_entry->head, &dev->vmalist);
0402 }
0403 }
0404
0405 static void drm_vm_open(struct vm_area_struct *vma)
0406 {
0407 struct drm_file *priv = vma->vm_file->private_data;
0408 struct drm_device *dev = priv->minor->dev;
0409
0410 mutex_lock(&dev->struct_mutex);
0411 drm_vm_open_locked(dev, vma);
0412 mutex_unlock(&dev->struct_mutex);
0413 }
0414
0415 static void drm_vm_close_locked(struct drm_device *dev,
0416 struct vm_area_struct *vma)
0417 {
0418 struct drm_vma_entry *pt, *temp;
0419
0420 DRM_DEBUG("0x%08lx,0x%08lx\n",
0421 vma->vm_start, vma->vm_end - vma->vm_start);
0422
0423 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
0424 if (pt->vma == vma) {
0425 list_del(&pt->head);
0426 kfree(pt);
0427 break;
0428 }
0429 }
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static void drm_vm_close(struct vm_area_struct *vma)
0441 {
0442 struct drm_file *priv = vma->vm_file->private_data;
0443 struct drm_device *dev = priv->minor->dev;
0444
0445 mutex_lock(&dev->struct_mutex);
0446 drm_vm_close_locked(dev, vma);
0447 mutex_unlock(&dev->struct_mutex);
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
0461 {
0462 struct drm_file *priv = filp->private_data;
0463 struct drm_device *dev;
0464 struct drm_device_dma *dma;
0465 unsigned long length = vma->vm_end - vma->vm_start;
0466
0467 dev = priv->minor->dev;
0468 dma = dev->dma;
0469 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
0470 vma->vm_start, vma->vm_end, vma->vm_pgoff);
0471
0472
0473 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
0474 return -EINVAL;
0475 }
0476
0477 if (!capable(CAP_SYS_ADMIN) &&
0478 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
0479 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
0480 #if defined(__i386__) || defined(__x86_64__)
0481 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
0482 #else
0483
0484
0485
0486 vma->vm_page_prot =
0487 __pgprot(pte_val
0488 (pte_wrprotect
0489 (__pte(pgprot_val(vma->vm_page_prot)))));
0490 #endif
0491 }
0492
0493 vma->vm_ops = &drm_vm_dma_ops;
0494
0495 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
0496
0497 drm_vm_open_locked(dev, vma);
0498 return 0;
0499 }
0500
0501 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
0502 {
0503 #ifdef __alpha__
0504 return dev->hose->dense_mem_base;
0505 #else
0506 return 0;
0507 #endif
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
0524 {
0525 struct drm_file *priv = filp->private_data;
0526 struct drm_device *dev = priv->minor->dev;
0527 struct drm_local_map *map = NULL;
0528 resource_size_t offset = 0;
0529 struct drm_hash_item *hash;
0530
0531 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
0532 vma->vm_start, vma->vm_end, vma->vm_pgoff);
0533
0534 if (!priv->authenticated)
0535 return -EACCES;
0536
0537
0538
0539
0540
0541 if (!vma->vm_pgoff
0542 #if IS_ENABLED(CONFIG_AGP)
0543 && (!dev->agp
0544 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
0545 #endif
0546 )
0547 return drm_mmap_dma(filp, vma);
0548
0549 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
0550 DRM_ERROR("Could not find map\n");
0551 return -EINVAL;
0552 }
0553
0554 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
0555 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
0556 return -EPERM;
0557
0558
0559 if (map->size < vma->vm_end - vma->vm_start)
0560 return -EINVAL;
0561
0562 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
0563 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
0564 #if defined(__i386__) || defined(__x86_64__)
0565 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
0566 #else
0567
0568
0569
0570 vma->vm_page_prot =
0571 __pgprot(pte_val
0572 (pte_wrprotect
0573 (__pte(pgprot_val(vma->vm_page_prot)))));
0574 #endif
0575 }
0576
0577 switch (map->type) {
0578 #if !defined(__arm__)
0579 case _DRM_AGP:
0580 if (dev->agp && dev->agp->cant_use_aperture) {
0581
0582
0583
0584
0585
0586 #if defined(__powerpc__)
0587 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0588 #endif
0589 vma->vm_ops = &drm_vm_ops;
0590 break;
0591 }
0592 fallthrough;
0593 #endif
0594 case _DRM_FRAME_BUFFER:
0595 case _DRM_REGISTERS:
0596 offset = drm_core_get_reg_ofs(dev);
0597 vma->vm_page_prot = drm_io_prot(map, vma);
0598 if (io_remap_pfn_range(vma, vma->vm_start,
0599 (map->offset + offset) >> PAGE_SHIFT,
0600 vma->vm_end - vma->vm_start,
0601 vma->vm_page_prot))
0602 return -EAGAIN;
0603 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
0604 " offset = 0x%llx\n",
0605 map->type,
0606 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
0607
0608 vma->vm_ops = &drm_vm_ops;
0609 break;
0610 case _DRM_CONSISTENT:
0611
0612
0613 if (remap_pfn_range(vma, vma->vm_start,
0614 page_to_pfn(virt_to_page(map->handle)),
0615 vma->vm_end - vma->vm_start, vma->vm_page_prot))
0616 return -EAGAIN;
0617 vma->vm_page_prot = drm_dma_prot(map->type, vma);
0618 fallthrough;
0619 case _DRM_SHM:
0620 vma->vm_ops = &drm_vm_shm_ops;
0621 vma->vm_private_data = (void *)map;
0622 break;
0623 case _DRM_SCATTER_GATHER:
0624 vma->vm_ops = &drm_vm_sg_ops;
0625 vma->vm_private_data = (void *)map;
0626 vma->vm_page_prot = drm_dma_prot(map->type, vma);
0627 break;
0628 default:
0629 return -EINVAL;
0630 }
0631 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
0632
0633 drm_vm_open_locked(dev, vma);
0634 return 0;
0635 }
0636
0637 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
0638 {
0639 struct drm_file *priv = filp->private_data;
0640 struct drm_device *dev = priv->minor->dev;
0641 int ret;
0642
0643 if (drm_dev_is_unplugged(dev))
0644 return -ENODEV;
0645
0646 mutex_lock(&dev->struct_mutex);
0647 ret = drm_mmap_locked(filp, vma);
0648 mutex_unlock(&dev->struct_mutex);
0649
0650 return ret;
0651 }
0652 EXPORT_SYMBOL(drm_legacy_mmap);
0653
0654 #if IS_ENABLED(CONFIG_DRM_LEGACY)
0655 void drm_legacy_vma_flush(struct drm_device *dev)
0656 {
0657 struct drm_vma_entry *vma, *vma_temp;
0658
0659
0660 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
0661 list_del(&vma->head);
0662 kfree(vma);
0663 }
0664 }
0665 #endif