0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/anon_inodes.h>
0035 #include <linux/dma-fence.h>
0036 #include <linux/file.h>
0037 #include <linux/module.h>
0038 #include <linux/pci.h>
0039 #include <linux/poll.h>
0040 #include <linux/slab.h>
0041
0042 #include <drm/drm_client.h>
0043 #include <drm/drm_drv.h>
0044 #include <drm/drm_file.h>
0045 #include <drm/drm_print.h>
0046
0047 #include "drm_crtc_internal.h"
0048 #include "drm_internal.h"
0049 #include "drm_legacy.h"
0050
0051 #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
0052 #include <uapi/asm/mman.h>
0053 #include <drm/drm_vma_manager.h>
0054 #endif
0055
0056
0057 DEFINE_MUTEX(drm_global_mutex);
0058
0059 bool drm_dev_needs_global_mutex(struct drm_device *dev)
0060 {
0061
0062
0063
0064
0065
0066 if (drm_core_check_feature(dev, DRIVER_LEGACY))
0067 return true;
0068
0069
0070
0071
0072
0073
0074
0075 if (dev->driver->load || dev->driver->unload)
0076 return true;
0077
0078
0079
0080
0081
0082
0083
0084 if (dev->driver->lastclose)
0085 return true;
0086
0087 return false;
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154 struct drm_file *drm_file_alloc(struct drm_minor *minor)
0155 {
0156 struct drm_device *dev = minor->dev;
0157 struct drm_file *file;
0158 int ret;
0159
0160 file = kzalloc(sizeof(*file), GFP_KERNEL);
0161 if (!file)
0162 return ERR_PTR(-ENOMEM);
0163
0164 file->pid = get_pid(task_pid(current));
0165 file->minor = minor;
0166
0167
0168 file->authenticated = capable(CAP_SYS_ADMIN);
0169
0170 INIT_LIST_HEAD(&file->lhead);
0171 INIT_LIST_HEAD(&file->fbs);
0172 mutex_init(&file->fbs_lock);
0173 INIT_LIST_HEAD(&file->blobs);
0174 INIT_LIST_HEAD(&file->pending_event_list);
0175 INIT_LIST_HEAD(&file->event_list);
0176 init_waitqueue_head(&file->event_wait);
0177 file->event_space = 4096;
0178
0179 spin_lock_init(&file->master_lookup_lock);
0180 mutex_init(&file->event_read_lock);
0181
0182 if (drm_core_check_feature(dev, DRIVER_GEM))
0183 drm_gem_open(dev, file);
0184
0185 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0186 drm_syncobj_open(file);
0187
0188 drm_prime_init_file_private(&file->prime);
0189
0190 if (dev->driver->open) {
0191 ret = dev->driver->open(dev, file);
0192 if (ret < 0)
0193 goto out_prime_destroy;
0194 }
0195
0196 return file;
0197
0198 out_prime_destroy:
0199 drm_prime_destroy_file_private(&file->prime);
0200 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0201 drm_syncobj_release(file);
0202 if (drm_core_check_feature(dev, DRIVER_GEM))
0203 drm_gem_release(dev, file);
0204 put_pid(file->pid);
0205 kfree(file);
0206
0207 return ERR_PTR(ret);
0208 }
0209
0210 static void drm_events_release(struct drm_file *file_priv)
0211 {
0212 struct drm_device *dev = file_priv->minor->dev;
0213 struct drm_pending_event *e, *et;
0214 unsigned long flags;
0215
0216 spin_lock_irqsave(&dev->event_lock, flags);
0217
0218
0219 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
0220 pending_link) {
0221 list_del(&e->pending_link);
0222 e->file_priv = NULL;
0223 }
0224
0225
0226 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
0227 list_del(&e->link);
0228 kfree(e);
0229 }
0230
0231 spin_unlock_irqrestore(&dev->event_lock, flags);
0232 }
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 void drm_file_free(struct drm_file *file)
0245 {
0246 struct drm_device *dev;
0247
0248 if (!file)
0249 return;
0250
0251 dev = file->minor->dev;
0252
0253 DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
0254 current->comm, task_pid_nr(current),
0255 (long)old_encode_dev(file->minor->kdev->devt),
0256 atomic_read(&dev->open_count));
0257
0258 #ifdef CONFIG_DRM_LEGACY
0259 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
0260 dev->driver->preclose)
0261 dev->driver->preclose(dev, file);
0262 #endif
0263
0264 if (drm_core_check_feature(dev, DRIVER_LEGACY))
0265 drm_legacy_lock_release(dev, file->filp);
0266
0267 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
0268 drm_legacy_reclaim_buffers(dev, file);
0269
0270 drm_events_release(file);
0271
0272 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
0273 drm_fb_release(file);
0274 drm_property_destroy_user_blobs(dev, file);
0275 }
0276
0277 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0278 drm_syncobj_release(file);
0279
0280 if (drm_core_check_feature(dev, DRIVER_GEM))
0281 drm_gem_release(dev, file);
0282
0283 drm_legacy_ctxbitmap_flush(dev, file);
0284
0285 if (drm_is_primary_client(file))
0286 drm_master_release(file);
0287
0288 if (dev->driver->postclose)
0289 dev->driver->postclose(dev, file);
0290
0291 drm_prime_destroy_file_private(&file->prime);
0292
0293 WARN_ON(!list_empty(&file->event_list));
0294
0295 put_pid(file->pid);
0296 kfree(file);
0297 }
0298
0299 static void drm_close_helper(struct file *filp)
0300 {
0301 struct drm_file *file_priv = filp->private_data;
0302 struct drm_device *dev = file_priv->minor->dev;
0303
0304 mutex_lock(&dev->filelist_mutex);
0305 list_del(&file_priv->lhead);
0306 mutex_unlock(&dev->filelist_mutex);
0307
0308 drm_file_free(file_priv);
0309 }
0310
0311
0312
0313
0314
0315
0316 static int drm_cpu_valid(void)
0317 {
0318 #if defined(__sparc__) && !defined(__sparc_v9__)
0319 return 0;
0320 #endif
0321 return 1;
0322 }
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
0335 {
0336 struct drm_device *dev = minor->dev;
0337 struct drm_file *priv;
0338 int ret;
0339
0340 if (filp->f_flags & O_EXCL)
0341 return -EBUSY;
0342 if (!drm_cpu_valid())
0343 return -EINVAL;
0344 if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
0345 dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
0346 return -EINVAL;
0347
0348 DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
0349 task_pid_nr(current), minor->index);
0350
0351 priv = drm_file_alloc(minor);
0352 if (IS_ERR(priv))
0353 return PTR_ERR(priv);
0354
0355 if (drm_is_primary_client(priv)) {
0356 ret = drm_master_open(priv);
0357 if (ret) {
0358 drm_file_free(priv);
0359 return ret;
0360 }
0361 }
0362
0363 filp->private_data = priv;
0364 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
0365 priv->filp = filp;
0366
0367 mutex_lock(&dev->filelist_mutex);
0368 list_add(&priv->lhead, &dev->filelist);
0369 mutex_unlock(&dev->filelist_mutex);
0370
0371 #ifdef CONFIG_DRM_LEGACY
0372 #ifdef __alpha__
0373
0374
0375
0376 if (!dev->hose) {
0377 struct pci_dev *pci_dev;
0378
0379 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
0380 if (pci_dev) {
0381 dev->hose = pci_dev->sysdata;
0382 pci_dev_put(pci_dev);
0383 }
0384 if (!dev->hose) {
0385 struct pci_bus *b = list_entry(pci_root_buses.next,
0386 struct pci_bus, node);
0387 if (b)
0388 dev->hose = b->sysdata;
0389 }
0390 }
0391 #endif
0392 #endif
0393
0394 return 0;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410 int drm_open(struct inode *inode, struct file *filp)
0411 {
0412 struct drm_device *dev;
0413 struct drm_minor *minor;
0414 int retcode;
0415 int need_setup = 0;
0416
0417 minor = drm_minor_acquire(iminor(inode));
0418 if (IS_ERR(minor))
0419 return PTR_ERR(minor);
0420
0421 dev = minor->dev;
0422 if (drm_dev_needs_global_mutex(dev))
0423 mutex_lock(&drm_global_mutex);
0424
0425 if (!atomic_fetch_inc(&dev->open_count))
0426 need_setup = 1;
0427
0428
0429 filp->f_mapping = dev->anon_inode->i_mapping;
0430
0431 retcode = drm_open_helper(filp, minor);
0432 if (retcode)
0433 goto err_undo;
0434 if (need_setup) {
0435 retcode = drm_legacy_setup(dev);
0436 if (retcode) {
0437 drm_close_helper(filp);
0438 goto err_undo;
0439 }
0440 }
0441
0442 if (drm_dev_needs_global_mutex(dev))
0443 mutex_unlock(&drm_global_mutex);
0444
0445 return 0;
0446
0447 err_undo:
0448 atomic_dec(&dev->open_count);
0449 if (drm_dev_needs_global_mutex(dev))
0450 mutex_unlock(&drm_global_mutex);
0451 drm_minor_release(minor);
0452 return retcode;
0453 }
0454 EXPORT_SYMBOL(drm_open);
0455
0456 void drm_lastclose(struct drm_device * dev)
0457 {
0458 DRM_DEBUG("\n");
0459
0460 if (dev->driver->lastclose)
0461 dev->driver->lastclose(dev);
0462 DRM_DEBUG("driver lastclose completed\n");
0463
0464 if (drm_core_check_feature(dev, DRIVER_LEGACY))
0465 drm_legacy_dev_reinit(dev);
0466
0467 drm_client_dev_restore(dev);
0468 }
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484 int drm_release(struct inode *inode, struct file *filp)
0485 {
0486 struct drm_file *file_priv = filp->private_data;
0487 struct drm_minor *minor = file_priv->minor;
0488 struct drm_device *dev = minor->dev;
0489
0490 if (drm_dev_needs_global_mutex(dev))
0491 mutex_lock(&drm_global_mutex);
0492
0493 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
0494
0495 drm_close_helper(filp);
0496
0497 if (atomic_dec_and_test(&dev->open_count))
0498 drm_lastclose(dev);
0499
0500 if (drm_dev_needs_global_mutex(dev))
0501 mutex_unlock(&drm_global_mutex);
0502
0503 drm_minor_release(minor);
0504
0505 return 0;
0506 }
0507 EXPORT_SYMBOL(drm_release);
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 int drm_release_noglobal(struct inode *inode, struct file *filp)
0525 {
0526 struct drm_file *file_priv = filp->private_data;
0527 struct drm_minor *minor = file_priv->minor;
0528 struct drm_device *dev = minor->dev;
0529
0530 drm_close_helper(filp);
0531
0532 if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
0533 drm_lastclose(dev);
0534 mutex_unlock(&drm_global_mutex);
0535 }
0536
0537 drm_minor_release(minor);
0538
0539 return 0;
0540 }
0541 EXPORT_SYMBOL(drm_release_noglobal);
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568 ssize_t drm_read(struct file *filp, char __user *buffer,
0569 size_t count, loff_t *offset)
0570 {
0571 struct drm_file *file_priv = filp->private_data;
0572 struct drm_device *dev = file_priv->minor->dev;
0573 ssize_t ret;
0574
0575 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
0576 if (ret)
0577 return ret;
0578
0579 for (;;) {
0580 struct drm_pending_event *e = NULL;
0581
0582 spin_lock_irq(&dev->event_lock);
0583 if (!list_empty(&file_priv->event_list)) {
0584 e = list_first_entry(&file_priv->event_list,
0585 struct drm_pending_event, link);
0586 file_priv->event_space += e->event->length;
0587 list_del(&e->link);
0588 }
0589 spin_unlock_irq(&dev->event_lock);
0590
0591 if (e == NULL) {
0592 if (ret)
0593 break;
0594
0595 if (filp->f_flags & O_NONBLOCK) {
0596 ret = -EAGAIN;
0597 break;
0598 }
0599
0600 mutex_unlock(&file_priv->event_read_lock);
0601 ret = wait_event_interruptible(file_priv->event_wait,
0602 !list_empty(&file_priv->event_list));
0603 if (ret >= 0)
0604 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
0605 if (ret)
0606 return ret;
0607 } else {
0608 unsigned length = e->event->length;
0609
0610 if (length > count - ret) {
0611 put_back_event:
0612 spin_lock_irq(&dev->event_lock);
0613 file_priv->event_space -= length;
0614 list_add(&e->link, &file_priv->event_list);
0615 spin_unlock_irq(&dev->event_lock);
0616 wake_up_interruptible_poll(&file_priv->event_wait,
0617 EPOLLIN | EPOLLRDNORM);
0618 break;
0619 }
0620
0621 if (copy_to_user(buffer + ret, e->event, length)) {
0622 if (ret == 0)
0623 ret = -EFAULT;
0624 goto put_back_event;
0625 }
0626
0627 ret += length;
0628 kfree(e);
0629 }
0630 }
0631 mutex_unlock(&file_priv->event_read_lock);
0632
0633 return ret;
0634 }
0635 EXPORT_SYMBOL(drm_read);
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
0654 {
0655 struct drm_file *file_priv = filp->private_data;
0656 __poll_t mask = 0;
0657
0658 poll_wait(filp, &file_priv->event_wait, wait);
0659
0660 if (!list_empty(&file_priv->event_list))
0661 mask |= EPOLLIN | EPOLLRDNORM;
0662
0663 return mask;
0664 }
0665 EXPORT_SYMBOL(drm_poll);
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 int drm_event_reserve_init_locked(struct drm_device *dev,
0692 struct drm_file *file_priv,
0693 struct drm_pending_event *p,
0694 struct drm_event *e)
0695 {
0696 if (file_priv->event_space < e->length)
0697 return -ENOMEM;
0698
0699 file_priv->event_space -= e->length;
0700
0701 p->event = e;
0702 list_add(&p->pending_link, &file_priv->pending_event_list);
0703 p->file_priv = file_priv;
0704
0705 return 0;
0706 }
0707 EXPORT_SYMBOL(drm_event_reserve_init_locked);
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733 int drm_event_reserve_init(struct drm_device *dev,
0734 struct drm_file *file_priv,
0735 struct drm_pending_event *p,
0736 struct drm_event *e)
0737 {
0738 unsigned long flags;
0739 int ret;
0740
0741 spin_lock_irqsave(&dev->event_lock, flags);
0742 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
0743 spin_unlock_irqrestore(&dev->event_lock, flags);
0744
0745 return ret;
0746 }
0747 EXPORT_SYMBOL(drm_event_reserve_init);
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758 void drm_event_cancel_free(struct drm_device *dev,
0759 struct drm_pending_event *p)
0760 {
0761 unsigned long flags;
0762
0763 spin_lock_irqsave(&dev->event_lock, flags);
0764 if (p->file_priv) {
0765 p->file_priv->event_space += p->event->length;
0766 list_del(&p->pending_link);
0767 }
0768 spin_unlock_irqrestore(&dev->event_lock, flags);
0769
0770 if (p->fence)
0771 dma_fence_put(p->fence);
0772
0773 kfree(p);
0774 }
0775 EXPORT_SYMBOL(drm_event_cancel_free);
0776
0777 static void drm_send_event_helper(struct drm_device *dev,
0778 struct drm_pending_event *e, ktime_t timestamp)
0779 {
0780 assert_spin_locked(&dev->event_lock);
0781
0782 if (e->completion) {
0783 complete_all(e->completion);
0784 e->completion_release(e->completion);
0785 e->completion = NULL;
0786 }
0787
0788 if (e->fence) {
0789 if (timestamp)
0790 dma_fence_signal_timestamp(e->fence, timestamp);
0791 else
0792 dma_fence_signal(e->fence);
0793 dma_fence_put(e->fence);
0794 }
0795
0796 if (!e->file_priv) {
0797 kfree(e);
0798 return;
0799 }
0800
0801 list_del(&e->pending_link);
0802 list_add_tail(&e->link,
0803 &e->file_priv->event_list);
0804 wake_up_interruptible_poll(&e->file_priv->event_wait,
0805 EPOLLIN | EPOLLRDNORM);
0806 }
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 void drm_send_event_timestamp_locked(struct drm_device *dev,
0825 struct drm_pending_event *e, ktime_t timestamp)
0826 {
0827 drm_send_event_helper(dev, e, timestamp);
0828 }
0829 EXPORT_SYMBOL(drm_send_event_timestamp_locked);
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
0846 {
0847 drm_send_event_helper(dev, e, 0);
0848 }
0849 EXPORT_SYMBOL(drm_send_event_locked);
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
0867 {
0868 unsigned long irqflags;
0869
0870 spin_lock_irqsave(&dev->event_lock, irqflags);
0871 drm_send_event_helper(dev, e, 0);
0872 spin_unlock_irqrestore(&dev->event_lock, irqflags);
0873 }
0874 EXPORT_SYMBOL(drm_send_event);
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
0891 {
0892 struct drm_device *dev = minor->dev;
0893 struct drm_file *priv;
0894 struct file *file;
0895
0896 priv = drm_file_alloc(minor);
0897 if (IS_ERR(priv))
0898 return ERR_CAST(priv);
0899
0900 file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
0901 if (IS_ERR(file)) {
0902 drm_file_free(priv);
0903 return file;
0904 }
0905
0906
0907 file->f_mapping = dev->anon_inode->i_mapping;
0908
0909 drm_dev_get(dev);
0910 priv->filp = file;
0911
0912 return file;
0913 }
0914 EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
0915
0916 #ifdef CONFIG_MMU
0917 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0918
0919
0920
0921
0922
0923 static unsigned long drm_addr_inflate(unsigned long addr,
0924 unsigned long len,
0925 unsigned long pgoff,
0926 unsigned long flags,
0927 unsigned long huge_size)
0928 {
0929 unsigned long offset, inflated_len;
0930 unsigned long inflated_addr;
0931 unsigned long inflated_offset;
0932
0933 offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
0934 if (offset && offset + len < 2 * huge_size)
0935 return addr;
0936 if ((addr & (huge_size - 1)) == offset)
0937 return addr;
0938
0939 inflated_len = len + huge_size - PAGE_SIZE;
0940 if (inflated_len > TASK_SIZE)
0941 return addr;
0942 if (inflated_len < len)
0943 return addr;
0944
0945 inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
0946 0, flags);
0947 if (IS_ERR_VALUE(inflated_addr))
0948 return addr;
0949 if (inflated_addr & ~PAGE_MASK)
0950 return addr;
0951
0952 inflated_offset = inflated_addr & (huge_size - 1);
0953 inflated_addr += offset - inflated_offset;
0954 if (inflated_offset > offset)
0955 inflated_addr += huge_size;
0956
0957 if (inflated_addr > TASK_SIZE - len)
0958 return addr;
0959
0960 return inflated_addr;
0961 }
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 unsigned long drm_get_unmapped_area(struct file *file,
0986 unsigned long uaddr, unsigned long len,
0987 unsigned long pgoff, unsigned long flags,
0988 struct drm_vma_offset_manager *mgr)
0989 {
0990 unsigned long addr;
0991 unsigned long inflated_addr;
0992 struct drm_vma_offset_node *node;
0993
0994 if (len > TASK_SIZE)
0995 return -ENOMEM;
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005 drm_vma_offset_lock_lookup(mgr);
1006 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1007 if (node)
1008 pgoff -= node->vm_node.start;
1009 drm_vma_offset_unlock_lookup(mgr);
1010
1011 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1012 if (IS_ERR_VALUE(addr))
1013 return addr;
1014 if (addr & ~PAGE_MASK)
1015 return addr;
1016 if (addr > TASK_SIZE - len)
1017 return addr;
1018
1019 if (len < HPAGE_PMD_SIZE)
1020 return addr;
1021 if (flags & MAP_FIXED)
1022 return addr;
1023
1024
1025
1026
1027
1028 if (uaddr)
1029 return addr;
1030
1031 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1032 HPAGE_PMD_SIZE);
1033
1034 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1035 len >= HPAGE_PUD_SIZE)
1036 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1037 flags, HPAGE_PUD_SIZE);
1038 return inflated_addr;
1039 }
1040 #else
1041 unsigned long drm_get_unmapped_area(struct file *file,
1042 unsigned long uaddr, unsigned long len,
1043 unsigned long pgoff, unsigned long flags,
1044 struct drm_vma_offset_manager *mgr)
1045 {
1046 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1047 }
1048 #endif
1049 EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1050 #endif