Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * \author Rickard E. (Rik) Faith <faith@valinux.com>
0003  * \author Daryll Strauss <daryll@valinux.com>
0004  * \author Gareth Hughes <gareth@valinux.com>
0005  */
0006 
0007 /*
0008  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
0009  *
0010  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
0011  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
0012  * All Rights Reserved.
0013  *
0014  * Permission is hereby granted, free of charge, to any person obtaining a
0015  * copy of this software and associated documentation files (the "Software"),
0016  * to deal in the Software without restriction, including without limitation
0017  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0018  * and/or sell copies of the Software, and to permit persons to whom the
0019  * Software is furnished to do so, subject to the following conditions:
0020  *
0021  * The above copyright notice and this permission notice (including the next
0022  * paragraph) shall be included in all copies or substantial portions of the
0023  * Software.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0026  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0027  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0028  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
0029  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0030  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0031  * OTHER DEALINGS IN THE SOFTWARE.
0032  */
0033 
0034 #include <linux/anon_inodes.h>
0035 #include <linux/dma-fence.h>
0036 #include <linux/file.h>
0037 #include <linux/module.h>
0038 #include <linux/pci.h>
0039 #include <linux/poll.h>
0040 #include <linux/slab.h>
0041 
0042 #include <drm/drm_client.h>
0043 #include <drm/drm_drv.h>
0044 #include <drm/drm_file.h>
0045 #include <drm/drm_print.h>
0046 
0047 #include "drm_crtc_internal.h"
0048 #include "drm_internal.h"
0049 #include "drm_legacy.h"
0050 
0051 #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
0052 #include <uapi/asm/mman.h>
0053 #include <drm/drm_vma_manager.h>
0054 #endif
0055 
0056 /* from BKL pushdown */
0057 DEFINE_MUTEX(drm_global_mutex);
0058 
0059 bool drm_dev_needs_global_mutex(struct drm_device *dev)
0060 {
0061     /*
0062      * Legacy drivers rely on all kinds of BKL locking semantics, don't
0063      * bother. They also still need BKL locking for their ioctls, so better
0064      * safe than sorry.
0065      */
0066     if (drm_core_check_feature(dev, DRIVER_LEGACY))
0067         return true;
0068 
0069     /*
0070      * The deprecated ->load callback must be called after the driver is
0071      * already registered. This means such drivers rely on the BKL to make
0072      * sure an open can't proceed until the driver is actually fully set up.
0073      * Similar hilarity holds for the unload callback.
0074      */
0075     if (dev->driver->load || dev->driver->unload)
0076         return true;
0077 
0078     /*
0079      * Drivers with the lastclose callback assume that it's synchronized
0080      * against concurrent opens, which again needs the BKL. The proper fix
0081      * is to use the drm_client infrastructure with proper locking for each
0082      * client.
0083      */
0084     if (dev->driver->lastclose)
0085         return true;
0086 
0087     return false;
0088 }
0089 
0090 /**
0091  * DOC: file operations
0092  *
0093  * Drivers must define the file operations structure that forms the DRM
0094  * userspace API entry point, even though most of those operations are
0095  * implemented in the DRM core. The resulting &struct file_operations must be
0096  * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
0097  * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
0098  * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
0099  * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
0100  * that require 32/64 bit compatibility support must provide their own
0101  * &file_operations.compat_ioctl handler that processes private ioctls and calls
0102  * drm_compat_ioctl() for core ioctls.
0103  *
0104  * In addition drm_read() and drm_poll() provide support for DRM events. DRM
0105  * events are a generic and extensible means to send asynchronous events to
0106  * userspace through the file descriptor. They are used to send vblank event and
0107  * page flip completions by the KMS API. But drivers can also use it for their
0108  * own needs, e.g. to signal completion of rendering.
0109  *
0110  * For the driver-side event interface see drm_event_reserve_init() and
0111  * drm_send_event() as the main starting points.
0112  *
0113  * The memory mapping implementation will vary depending on how the driver
0114  * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
0115  * function, modern drivers should use one of the provided memory-manager
0116  * specific implementations. For GEM-based drivers this is drm_gem_mmap().
0117  *
0118  * No other file operations are supported by the DRM userspace API. Overall the
0119  * following is an example &file_operations structure::
0120  *
0121  *     static const example_drm_fops = {
0122  *             .owner = THIS_MODULE,
0123  *             .open = drm_open,
0124  *             .release = drm_release,
0125  *             .unlocked_ioctl = drm_ioctl,
0126  *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
0127  *             .poll = drm_poll,
0128  *             .read = drm_read,
0129  *             .llseek = no_llseek,
0130  *             .mmap = drm_gem_mmap,
0131  *     };
0132  *
0133  * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
0134  * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
0135  * simpler.
0136  *
0137  * The driver's &file_operations must be stored in &drm_driver.fops.
0138  *
0139  * For driver-private IOCTL handling see the more detailed discussion in
0140  * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
0141  */
0142 
0143 /**
0144  * drm_file_alloc - allocate file context
0145  * @minor: minor to allocate on
0146  *
0147  * This allocates a new DRM file context. It is not linked into any context and
0148  * can be used by the caller freely. Note that the context keeps a pointer to
0149  * @minor, so it must be freed before @minor is.
0150  *
0151  * RETURNS:
0152  * Pointer to newly allocated context, ERR_PTR on failure.
0153  */
0154 struct drm_file *drm_file_alloc(struct drm_minor *minor)
0155 {
0156     struct drm_device *dev = minor->dev;
0157     struct drm_file *file;
0158     int ret;
0159 
0160     file = kzalloc(sizeof(*file), GFP_KERNEL);
0161     if (!file)
0162         return ERR_PTR(-ENOMEM);
0163 
0164     file->pid = get_pid(task_pid(current));
0165     file->minor = minor;
0166 
0167     /* for compatibility root is always authenticated */
0168     file->authenticated = capable(CAP_SYS_ADMIN);
0169 
0170     INIT_LIST_HEAD(&file->lhead);
0171     INIT_LIST_HEAD(&file->fbs);
0172     mutex_init(&file->fbs_lock);
0173     INIT_LIST_HEAD(&file->blobs);
0174     INIT_LIST_HEAD(&file->pending_event_list);
0175     INIT_LIST_HEAD(&file->event_list);
0176     init_waitqueue_head(&file->event_wait);
0177     file->event_space = 4096; /* set aside 4k for event buffer */
0178 
0179     spin_lock_init(&file->master_lookup_lock);
0180     mutex_init(&file->event_read_lock);
0181 
0182     if (drm_core_check_feature(dev, DRIVER_GEM))
0183         drm_gem_open(dev, file);
0184 
0185     if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0186         drm_syncobj_open(file);
0187 
0188     drm_prime_init_file_private(&file->prime);
0189 
0190     if (dev->driver->open) {
0191         ret = dev->driver->open(dev, file);
0192         if (ret < 0)
0193             goto out_prime_destroy;
0194     }
0195 
0196     return file;
0197 
0198 out_prime_destroy:
0199     drm_prime_destroy_file_private(&file->prime);
0200     if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0201         drm_syncobj_release(file);
0202     if (drm_core_check_feature(dev, DRIVER_GEM))
0203         drm_gem_release(dev, file);
0204     put_pid(file->pid);
0205     kfree(file);
0206 
0207     return ERR_PTR(ret);
0208 }
0209 
0210 static void drm_events_release(struct drm_file *file_priv)
0211 {
0212     struct drm_device *dev = file_priv->minor->dev;
0213     struct drm_pending_event *e, *et;
0214     unsigned long flags;
0215 
0216     spin_lock_irqsave(&dev->event_lock, flags);
0217 
0218     /* Unlink pending events */
0219     list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
0220                  pending_link) {
0221         list_del(&e->pending_link);
0222         e->file_priv = NULL;
0223     }
0224 
0225     /* Remove unconsumed events */
0226     list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
0227         list_del(&e->link);
0228         kfree(e);
0229     }
0230 
0231     spin_unlock_irqrestore(&dev->event_lock, flags);
0232 }
0233 
0234 /**
0235  * drm_file_free - free file context
0236  * @file: context to free, or NULL
0237  *
0238  * This destroys and deallocates a DRM file context previously allocated via
0239  * drm_file_alloc(). The caller must make sure to unlink it from any contexts
0240  * before calling this.
0241  *
0242  * If NULL is passed, this is a no-op.
0243  */
0244 void drm_file_free(struct drm_file *file)
0245 {
0246     struct drm_device *dev;
0247 
0248     if (!file)
0249         return;
0250 
0251     dev = file->minor->dev;
0252 
0253     DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
0254           current->comm, task_pid_nr(current),
0255           (long)old_encode_dev(file->minor->kdev->devt),
0256           atomic_read(&dev->open_count));
0257 
0258 #ifdef CONFIG_DRM_LEGACY
0259     if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
0260         dev->driver->preclose)
0261         dev->driver->preclose(dev, file);
0262 #endif
0263 
0264     if (drm_core_check_feature(dev, DRIVER_LEGACY))
0265         drm_legacy_lock_release(dev, file->filp);
0266 
0267     if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
0268         drm_legacy_reclaim_buffers(dev, file);
0269 
0270     drm_events_release(file);
0271 
0272     if (drm_core_check_feature(dev, DRIVER_MODESET)) {
0273         drm_fb_release(file);
0274         drm_property_destroy_user_blobs(dev, file);
0275     }
0276 
0277     if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
0278         drm_syncobj_release(file);
0279 
0280     if (drm_core_check_feature(dev, DRIVER_GEM))
0281         drm_gem_release(dev, file);
0282 
0283     drm_legacy_ctxbitmap_flush(dev, file);
0284 
0285     if (drm_is_primary_client(file))
0286         drm_master_release(file);
0287 
0288     if (dev->driver->postclose)
0289         dev->driver->postclose(dev, file);
0290 
0291     drm_prime_destroy_file_private(&file->prime);
0292 
0293     WARN_ON(!list_empty(&file->event_list));
0294 
0295     put_pid(file->pid);
0296     kfree(file);
0297 }
0298 
0299 static void drm_close_helper(struct file *filp)
0300 {
0301     struct drm_file *file_priv = filp->private_data;
0302     struct drm_device *dev = file_priv->minor->dev;
0303 
0304     mutex_lock(&dev->filelist_mutex);
0305     list_del(&file_priv->lhead);
0306     mutex_unlock(&dev->filelist_mutex);
0307 
0308     drm_file_free(file_priv);
0309 }
0310 
0311 /*
0312  * Check whether DRI will run on this CPU.
0313  *
0314  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
0315  */
0316 static int drm_cpu_valid(void)
0317 {
0318 #if defined(__sparc__) && !defined(__sparc_v9__)
0319     return 0;       /* No cmpxchg before v9 sparc. */
0320 #endif
0321     return 1;
0322 }
0323 
0324 /*
0325  * Called whenever a process opens a drm node
0326  *
0327  * \param filp file pointer.
0328  * \param minor acquired minor-object.
0329  * \return zero on success or a negative number on failure.
0330  *
0331  * Creates and initializes a drm_file structure for the file private data in \p
0332  * filp and add it into the double linked list in \p dev.
0333  */
0334 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
0335 {
0336     struct drm_device *dev = minor->dev;
0337     struct drm_file *priv;
0338     int ret;
0339 
0340     if (filp->f_flags & O_EXCL)
0341         return -EBUSY;  /* No exclusive opens */
0342     if (!drm_cpu_valid())
0343         return -EINVAL;
0344     if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
0345         dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
0346         return -EINVAL;
0347 
0348     DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
0349           task_pid_nr(current), minor->index);
0350 
0351     priv = drm_file_alloc(minor);
0352     if (IS_ERR(priv))
0353         return PTR_ERR(priv);
0354 
0355     if (drm_is_primary_client(priv)) {
0356         ret = drm_master_open(priv);
0357         if (ret) {
0358             drm_file_free(priv);
0359             return ret;
0360         }
0361     }
0362 
0363     filp->private_data = priv;
0364     filp->f_mode |= FMODE_UNSIGNED_OFFSET;
0365     priv->filp = filp;
0366 
0367     mutex_lock(&dev->filelist_mutex);
0368     list_add(&priv->lhead, &dev->filelist);
0369     mutex_unlock(&dev->filelist_mutex);
0370 
0371 #ifdef CONFIG_DRM_LEGACY
0372 #ifdef __alpha__
0373     /*
0374      * Default the hose
0375      */
0376     if (!dev->hose) {
0377         struct pci_dev *pci_dev;
0378 
0379         pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
0380         if (pci_dev) {
0381             dev->hose = pci_dev->sysdata;
0382             pci_dev_put(pci_dev);
0383         }
0384         if (!dev->hose) {
0385             struct pci_bus *b = list_entry(pci_root_buses.next,
0386                 struct pci_bus, node);
0387             if (b)
0388                 dev->hose = b->sysdata;
0389         }
0390     }
0391 #endif
0392 #endif
0393 
0394     return 0;
0395 }
0396 
0397 /**
0398  * drm_open - open method for DRM file
0399  * @inode: device inode
0400  * @filp: file pointer.
0401  *
0402  * This function must be used by drivers as their &file_operations.open method.
0403  * It looks up the correct DRM device and instantiates all the per-file
0404  * resources for it. It also calls the &drm_driver.open driver callback.
0405  *
0406  * RETURNS:
0407  *
0408  * 0 on success or negative errno value on failure.
0409  */
0410 int drm_open(struct inode *inode, struct file *filp)
0411 {
0412     struct drm_device *dev;
0413     struct drm_minor *minor;
0414     int retcode;
0415     int need_setup = 0;
0416 
0417     minor = drm_minor_acquire(iminor(inode));
0418     if (IS_ERR(minor))
0419         return PTR_ERR(minor);
0420 
0421     dev = minor->dev;
0422     if (drm_dev_needs_global_mutex(dev))
0423         mutex_lock(&drm_global_mutex);
0424 
0425     if (!atomic_fetch_inc(&dev->open_count))
0426         need_setup = 1;
0427 
0428     /* share address_space across all char-devs of a single device */
0429     filp->f_mapping = dev->anon_inode->i_mapping;
0430 
0431     retcode = drm_open_helper(filp, minor);
0432     if (retcode)
0433         goto err_undo;
0434     if (need_setup) {
0435         retcode = drm_legacy_setup(dev);
0436         if (retcode) {
0437             drm_close_helper(filp);
0438             goto err_undo;
0439         }
0440     }
0441 
0442     if (drm_dev_needs_global_mutex(dev))
0443         mutex_unlock(&drm_global_mutex);
0444 
0445     return 0;
0446 
0447 err_undo:
0448     atomic_dec(&dev->open_count);
0449     if (drm_dev_needs_global_mutex(dev))
0450         mutex_unlock(&drm_global_mutex);
0451     drm_minor_release(minor);
0452     return retcode;
0453 }
0454 EXPORT_SYMBOL(drm_open);
0455 
0456 void drm_lastclose(struct drm_device * dev)
0457 {
0458     DRM_DEBUG("\n");
0459 
0460     if (dev->driver->lastclose)
0461         dev->driver->lastclose(dev);
0462     DRM_DEBUG("driver lastclose completed\n");
0463 
0464     if (drm_core_check_feature(dev, DRIVER_LEGACY))
0465         drm_legacy_dev_reinit(dev);
0466 
0467     drm_client_dev_restore(dev);
0468 }
0469 
0470 /**
0471  * drm_release - release method for DRM file
0472  * @inode: device inode
0473  * @filp: file pointer.
0474  *
0475  * This function must be used by drivers as their &file_operations.release
0476  * method. It frees any resources associated with the open file, and calls the
0477  * &drm_driver.postclose driver callback. If this is the last open file for the
0478  * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
0479  *
0480  * RETURNS:
0481  *
0482  * Always succeeds and returns 0.
0483  */
0484 int drm_release(struct inode *inode, struct file *filp)
0485 {
0486     struct drm_file *file_priv = filp->private_data;
0487     struct drm_minor *minor = file_priv->minor;
0488     struct drm_device *dev = minor->dev;
0489 
0490     if (drm_dev_needs_global_mutex(dev))
0491         mutex_lock(&drm_global_mutex);
0492 
0493     DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
0494 
0495     drm_close_helper(filp);
0496 
0497     if (atomic_dec_and_test(&dev->open_count))
0498         drm_lastclose(dev);
0499 
0500     if (drm_dev_needs_global_mutex(dev))
0501         mutex_unlock(&drm_global_mutex);
0502 
0503     drm_minor_release(minor);
0504 
0505     return 0;
0506 }
0507 EXPORT_SYMBOL(drm_release);
0508 
0509 /**
0510  * drm_release_noglobal - release method for DRM file
0511  * @inode: device inode
0512  * @filp: file pointer.
0513  *
0514  * This function may be used by drivers as their &file_operations.release
0515  * method. It frees any resources associated with the open file prior to taking
0516  * the drm_global_mutex, which then calls the &drm_driver.postclose driver
0517  * callback. If this is the last open file for the DRM device also proceeds to
0518  * call the &drm_driver.lastclose driver callback.
0519  *
0520  * RETURNS:
0521  *
0522  * Always succeeds and returns 0.
0523  */
0524 int drm_release_noglobal(struct inode *inode, struct file *filp)
0525 {
0526     struct drm_file *file_priv = filp->private_data;
0527     struct drm_minor *minor = file_priv->minor;
0528     struct drm_device *dev = minor->dev;
0529 
0530     drm_close_helper(filp);
0531 
0532     if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
0533         drm_lastclose(dev);
0534         mutex_unlock(&drm_global_mutex);
0535     }
0536 
0537     drm_minor_release(minor);
0538 
0539     return 0;
0540 }
0541 EXPORT_SYMBOL(drm_release_noglobal);
0542 
0543 /**
0544  * drm_read - read method for DRM file
0545  * @filp: file pointer
0546  * @buffer: userspace destination pointer for the read
0547  * @count: count in bytes to read
0548  * @offset: offset to read
0549  *
0550  * This function must be used by drivers as their &file_operations.read
0551  * method if they use DRM events for asynchronous signalling to userspace.
0552  * Since events are used by the KMS API for vblank and page flip completion this
0553  * means all modern display drivers must use it.
0554  *
0555  * @offset is ignored, DRM events are read like a pipe. Polling support is
0556  * provided by drm_poll().
0557  *
0558  * This function will only ever read a full event. Therefore userspace must
0559  * supply a big enough buffer to fit any event to ensure forward progress. Since
0560  * the maximum event space is currently 4K it's recommended to just use that for
0561  * safety.
0562  *
0563  * RETURNS:
0564  *
0565  * Number of bytes read (always aligned to full events, and can be 0) or a
0566  * negative error code on failure.
0567  */
0568 ssize_t drm_read(struct file *filp, char __user *buffer,
0569          size_t count, loff_t *offset)
0570 {
0571     struct drm_file *file_priv = filp->private_data;
0572     struct drm_device *dev = file_priv->minor->dev;
0573     ssize_t ret;
0574 
0575     ret = mutex_lock_interruptible(&file_priv->event_read_lock);
0576     if (ret)
0577         return ret;
0578 
0579     for (;;) {
0580         struct drm_pending_event *e = NULL;
0581 
0582         spin_lock_irq(&dev->event_lock);
0583         if (!list_empty(&file_priv->event_list)) {
0584             e = list_first_entry(&file_priv->event_list,
0585                     struct drm_pending_event, link);
0586             file_priv->event_space += e->event->length;
0587             list_del(&e->link);
0588         }
0589         spin_unlock_irq(&dev->event_lock);
0590 
0591         if (e == NULL) {
0592             if (ret)
0593                 break;
0594 
0595             if (filp->f_flags & O_NONBLOCK) {
0596                 ret = -EAGAIN;
0597                 break;
0598             }
0599 
0600             mutex_unlock(&file_priv->event_read_lock);
0601             ret = wait_event_interruptible(file_priv->event_wait,
0602                                !list_empty(&file_priv->event_list));
0603             if (ret >= 0)
0604                 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
0605             if (ret)
0606                 return ret;
0607         } else {
0608             unsigned length = e->event->length;
0609 
0610             if (length > count - ret) {
0611 put_back_event:
0612                 spin_lock_irq(&dev->event_lock);
0613                 file_priv->event_space -= length;
0614                 list_add(&e->link, &file_priv->event_list);
0615                 spin_unlock_irq(&dev->event_lock);
0616                 wake_up_interruptible_poll(&file_priv->event_wait,
0617                     EPOLLIN | EPOLLRDNORM);
0618                 break;
0619             }
0620 
0621             if (copy_to_user(buffer + ret, e->event, length)) {
0622                 if (ret == 0)
0623                     ret = -EFAULT;
0624                 goto put_back_event;
0625             }
0626 
0627             ret += length;
0628             kfree(e);
0629         }
0630     }
0631     mutex_unlock(&file_priv->event_read_lock);
0632 
0633     return ret;
0634 }
0635 EXPORT_SYMBOL(drm_read);
0636 
0637 /**
0638  * drm_poll - poll method for DRM file
0639  * @filp: file pointer
0640  * @wait: poll waiter table
0641  *
0642  * This function must be used by drivers as their &file_operations.read method
0643  * if they use DRM events for asynchronous signalling to userspace.  Since
0644  * events are used by the KMS API for vblank and page flip completion this means
0645  * all modern display drivers must use it.
0646  *
0647  * See also drm_read().
0648  *
0649  * RETURNS:
0650  *
0651  * Mask of POLL flags indicating the current status of the file.
0652  */
0653 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
0654 {
0655     struct drm_file *file_priv = filp->private_data;
0656     __poll_t mask = 0;
0657 
0658     poll_wait(filp, &file_priv->event_wait, wait);
0659 
0660     if (!list_empty(&file_priv->event_list))
0661         mask |= EPOLLIN | EPOLLRDNORM;
0662 
0663     return mask;
0664 }
0665 EXPORT_SYMBOL(drm_poll);
0666 
0667 /**
0668  * drm_event_reserve_init_locked - init a DRM event and reserve space for it
0669  * @dev: DRM device
0670  * @file_priv: DRM file private data
0671  * @p: tracking structure for the pending event
0672  * @e: actual event data to deliver to userspace
0673  *
0674  * This function prepares the passed in event for eventual delivery. If the event
0675  * doesn't get delivered (because the IOCTL fails later on, before queuing up
0676  * anything) then the even must be cancelled and freed using
0677  * drm_event_cancel_free(). Successfully initialized events should be sent out
0678  * using drm_send_event() or drm_send_event_locked() to signal completion of the
0679  * asynchronous event to userspace.
0680  *
0681  * If callers embedded @p into a larger structure it must be allocated with
0682  * kmalloc and @p must be the first member element.
0683  *
0684  * This is the locked version of drm_event_reserve_init() for callers which
0685  * already hold &drm_device.event_lock.
0686  *
0687  * RETURNS:
0688  *
0689  * 0 on success or a negative error code on failure.
0690  */
0691 int drm_event_reserve_init_locked(struct drm_device *dev,
0692                   struct drm_file *file_priv,
0693                   struct drm_pending_event *p,
0694                   struct drm_event *e)
0695 {
0696     if (file_priv->event_space < e->length)
0697         return -ENOMEM;
0698 
0699     file_priv->event_space -= e->length;
0700 
0701     p->event = e;
0702     list_add(&p->pending_link, &file_priv->pending_event_list);
0703     p->file_priv = file_priv;
0704 
0705     return 0;
0706 }
0707 EXPORT_SYMBOL(drm_event_reserve_init_locked);
0708 
0709 /**
0710  * drm_event_reserve_init - init a DRM event and reserve space for it
0711  * @dev: DRM device
0712  * @file_priv: DRM file private data
0713  * @p: tracking structure for the pending event
0714  * @e: actual event data to deliver to userspace
0715  *
0716  * This function prepares the passed in event for eventual delivery. If the event
0717  * doesn't get delivered (because the IOCTL fails later on, before queuing up
0718  * anything) then the even must be cancelled and freed using
0719  * drm_event_cancel_free(). Successfully initialized events should be sent out
0720  * using drm_send_event() or drm_send_event_locked() to signal completion of the
0721  * asynchronous event to userspace.
0722  *
0723  * If callers embedded @p into a larger structure it must be allocated with
0724  * kmalloc and @p must be the first member element.
0725  *
0726  * Callers which already hold &drm_device.event_lock should use
0727  * drm_event_reserve_init_locked() instead.
0728  *
0729  * RETURNS:
0730  *
0731  * 0 on success or a negative error code on failure.
0732  */
0733 int drm_event_reserve_init(struct drm_device *dev,
0734                struct drm_file *file_priv,
0735                struct drm_pending_event *p,
0736                struct drm_event *e)
0737 {
0738     unsigned long flags;
0739     int ret;
0740 
0741     spin_lock_irqsave(&dev->event_lock, flags);
0742     ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
0743     spin_unlock_irqrestore(&dev->event_lock, flags);
0744 
0745     return ret;
0746 }
0747 EXPORT_SYMBOL(drm_event_reserve_init);
0748 
0749 /**
0750  * drm_event_cancel_free - free a DRM event and release its space
0751  * @dev: DRM device
0752  * @p: tracking structure for the pending event
0753  *
0754  * This function frees the event @p initialized with drm_event_reserve_init()
0755  * and releases any allocated space. It is used to cancel an event when the
0756  * nonblocking operation could not be submitted and needed to be aborted.
0757  */
0758 void drm_event_cancel_free(struct drm_device *dev,
0759                struct drm_pending_event *p)
0760 {
0761     unsigned long flags;
0762 
0763     spin_lock_irqsave(&dev->event_lock, flags);
0764     if (p->file_priv) {
0765         p->file_priv->event_space += p->event->length;
0766         list_del(&p->pending_link);
0767     }
0768     spin_unlock_irqrestore(&dev->event_lock, flags);
0769 
0770     if (p->fence)
0771         dma_fence_put(p->fence);
0772 
0773     kfree(p);
0774 }
0775 EXPORT_SYMBOL(drm_event_cancel_free);
0776 
0777 static void drm_send_event_helper(struct drm_device *dev,
0778                struct drm_pending_event *e, ktime_t timestamp)
0779 {
0780     assert_spin_locked(&dev->event_lock);
0781 
0782     if (e->completion) {
0783         complete_all(e->completion);
0784         e->completion_release(e->completion);
0785         e->completion = NULL;
0786     }
0787 
0788     if (e->fence) {
0789         if (timestamp)
0790             dma_fence_signal_timestamp(e->fence, timestamp);
0791         else
0792             dma_fence_signal(e->fence);
0793         dma_fence_put(e->fence);
0794     }
0795 
0796     if (!e->file_priv) {
0797         kfree(e);
0798         return;
0799     }
0800 
0801     list_del(&e->pending_link);
0802     list_add_tail(&e->link,
0803               &e->file_priv->event_list);
0804     wake_up_interruptible_poll(&e->file_priv->event_wait,
0805         EPOLLIN | EPOLLRDNORM);
0806 }
0807 
0808 /**
0809  * drm_send_event_timestamp_locked - send DRM event to file descriptor
0810  * @dev: DRM device
0811  * @e: DRM event to deliver
0812  * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
0813  * time domain
0814  *
0815  * This function sends the event @e, initialized with drm_event_reserve_init(),
0816  * to its associated userspace DRM file. Callers must already hold
0817  * &drm_device.event_lock.
0818  *
0819  * Note that the core will take care of unlinking and disarming events when the
0820  * corresponding DRM file is closed. Drivers need not worry about whether the
0821  * DRM file for this event still exists and can call this function upon
0822  * completion of the asynchronous work unconditionally.
0823  */
0824 void drm_send_event_timestamp_locked(struct drm_device *dev,
0825                      struct drm_pending_event *e, ktime_t timestamp)
0826 {
0827     drm_send_event_helper(dev, e, timestamp);
0828 }
0829 EXPORT_SYMBOL(drm_send_event_timestamp_locked);
0830 
0831 /**
0832  * drm_send_event_locked - send DRM event to file descriptor
0833  * @dev: DRM device
0834  * @e: DRM event to deliver
0835  *
0836  * This function sends the event @e, initialized with drm_event_reserve_init(),
0837  * to its associated userspace DRM file. Callers must already hold
0838  * &drm_device.event_lock, see drm_send_event() for the unlocked version.
0839  *
0840  * Note that the core will take care of unlinking and disarming events when the
0841  * corresponding DRM file is closed. Drivers need not worry about whether the
0842  * DRM file for this event still exists and can call this function upon
0843  * completion of the asynchronous work unconditionally.
0844  */
0845 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
0846 {
0847     drm_send_event_helper(dev, e, 0);
0848 }
0849 EXPORT_SYMBOL(drm_send_event_locked);
0850 
0851 /**
0852  * drm_send_event - send DRM event to file descriptor
0853  * @dev: DRM device
0854  * @e: DRM event to deliver
0855  *
0856  * This function sends the event @e, initialized with drm_event_reserve_init(),
0857  * to its associated userspace DRM file. This function acquires
0858  * &drm_device.event_lock, see drm_send_event_locked() for callers which already
0859  * hold this lock.
0860  *
0861  * Note that the core will take care of unlinking and disarming events when the
0862  * corresponding DRM file is closed. Drivers need not worry about whether the
0863  * DRM file for this event still exists and can call this function upon
0864  * completion of the asynchronous work unconditionally.
0865  */
0866 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
0867 {
0868     unsigned long irqflags;
0869 
0870     spin_lock_irqsave(&dev->event_lock, irqflags);
0871     drm_send_event_helper(dev, e, 0);
0872     spin_unlock_irqrestore(&dev->event_lock, irqflags);
0873 }
0874 EXPORT_SYMBOL(drm_send_event);
0875 
0876 /**
0877  * mock_drm_getfile - Create a new struct file for the drm device
0878  * @minor: drm minor to wrap (e.g. #drm_device.primary)
0879  * @flags: file creation mode (O_RDWR etc)
0880  *
0881  * This create a new struct file that wraps a DRM file context around a
0882  * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
0883  * invoking userspace. The struct file may be operated on using its f_op
0884  * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
0885  * to userspace facing functions as an internal/anonymous client.
0886  *
0887  * RETURNS:
0888  * Pointer to newly created struct file, ERR_PTR on failure.
0889  */
0890 struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
0891 {
0892     struct drm_device *dev = minor->dev;
0893     struct drm_file *priv;
0894     struct file *file;
0895 
0896     priv = drm_file_alloc(minor);
0897     if (IS_ERR(priv))
0898         return ERR_CAST(priv);
0899 
0900     file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
0901     if (IS_ERR(file)) {
0902         drm_file_free(priv);
0903         return file;
0904     }
0905 
0906     /* Everyone shares a single global address space */
0907     file->f_mapping = dev->anon_inode->i_mapping;
0908 
0909     drm_dev_get(dev);
0910     priv->filp = file;
0911 
0912     return file;
0913 }
0914 EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
0915 
0916 #ifdef CONFIG_MMU
0917 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0918 /*
0919  * drm_addr_inflate() attempts to construct an aligned area by inflating
0920  * the area size and skipping the unaligned start of the area.
0921  * adapted from shmem_get_unmapped_area()
0922  */
0923 static unsigned long drm_addr_inflate(unsigned long addr,
0924                       unsigned long len,
0925                       unsigned long pgoff,
0926                       unsigned long flags,
0927                       unsigned long huge_size)
0928 {
0929     unsigned long offset, inflated_len;
0930     unsigned long inflated_addr;
0931     unsigned long inflated_offset;
0932 
0933     offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
0934     if (offset && offset + len < 2 * huge_size)
0935         return addr;
0936     if ((addr & (huge_size - 1)) == offset)
0937         return addr;
0938 
0939     inflated_len = len + huge_size - PAGE_SIZE;
0940     if (inflated_len > TASK_SIZE)
0941         return addr;
0942     if (inflated_len < len)
0943         return addr;
0944 
0945     inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
0946                                0, flags);
0947     if (IS_ERR_VALUE(inflated_addr))
0948         return addr;
0949     if (inflated_addr & ~PAGE_MASK)
0950         return addr;
0951 
0952     inflated_offset = inflated_addr & (huge_size - 1);
0953     inflated_addr += offset - inflated_offset;
0954     if (inflated_offset > offset)
0955         inflated_addr += huge_size;
0956 
0957     if (inflated_addr > TASK_SIZE - len)
0958         return addr;
0959 
0960     return inflated_addr;
0961 }
0962 
0963 /**
0964  * drm_get_unmapped_area() - Get an unused user-space virtual memory area
0965  * suitable for huge page table entries.
0966  * @file: The struct file representing the address space being mmap()'d.
0967  * @uaddr: Start address suggested by user-space.
0968  * @len: Length of the area.
0969  * @pgoff: The page offset into the address space.
0970  * @flags: mmap flags
0971  * @mgr: The address space manager used by the drm driver. This argument can
0972  * probably be removed at some point when all drivers use the same
0973  * address space manager.
0974  *
0975  * This function attempts to find an unused user-space virtual memory area
0976  * that can accommodate the size we want to map, and that is properly
0977  * aligned to facilitate huge page table entries matching actual
0978  * huge pages or huge page aligned memory in buffer objects. Buffer objects
0979  * are assumed to start at huge page boundary pfns (io memory) or be
0980  * populated by huge pages aligned to the start of the buffer object
0981  * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
0982  *
0983  * Return: aligned user-space address.
0984  */
0985 unsigned long drm_get_unmapped_area(struct file *file,
0986                     unsigned long uaddr, unsigned long len,
0987                     unsigned long pgoff, unsigned long flags,
0988                     struct drm_vma_offset_manager *mgr)
0989 {
0990     unsigned long addr;
0991     unsigned long inflated_addr;
0992     struct drm_vma_offset_node *node;
0993 
0994     if (len > TASK_SIZE)
0995         return -ENOMEM;
0996 
0997     /*
0998      * @pgoff is the file page-offset the huge page boundaries of
0999      * which typically aligns to physical address huge page boundaries.
1000      * That's not true for DRM, however, where physical address huge
1001      * page boundaries instead are aligned with the offset from
1002      * buffer object start. So adjust @pgoff to be the offset from
1003      * buffer object start.
1004      */
1005     drm_vma_offset_lock_lookup(mgr);
1006     node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1007     if (node)
1008         pgoff -= node->vm_node.start;
1009     drm_vma_offset_unlock_lookup(mgr);
1010 
1011     addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1012     if (IS_ERR_VALUE(addr))
1013         return addr;
1014     if (addr & ~PAGE_MASK)
1015         return addr;
1016     if (addr > TASK_SIZE - len)
1017         return addr;
1018 
1019     if (len < HPAGE_PMD_SIZE)
1020         return addr;
1021     if (flags & MAP_FIXED)
1022         return addr;
1023     /*
1024      * Our priority is to support MAP_SHARED mapped hugely;
1025      * and support MAP_PRIVATE mapped hugely too, until it is COWed.
1026      * But if caller specified an address hint, respect that as before.
1027      */
1028     if (uaddr)
1029         return addr;
1030 
1031     inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1032                      HPAGE_PMD_SIZE);
1033 
1034     if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1035         len >= HPAGE_PUD_SIZE)
1036         inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1037                          flags, HPAGE_PUD_SIZE);
1038     return inflated_addr;
1039 }
1040 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
1041 unsigned long drm_get_unmapped_area(struct file *file,
1042                     unsigned long uaddr, unsigned long len,
1043                     unsigned long pgoff, unsigned long flags,
1044                     struct drm_vma_offset_manager *mgr)
1045 {
1046     return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1047 }
1048 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1049 EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1050 #endif /* CONFIG_MMU */