Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /*
0003  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
0004  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
0005  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the "Software"),
0009  * to deal in the Software without restriction, including without limitation
0010  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0011  * and/or sell copies of the Software, and to permit persons to whom the
0012  * Software is furnished to do so, subject to the following conditions:
0013  *
0014  * The above copyright notice and this permission notice shall be included in
0015  * all copies or substantial portions of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0018  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0019  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0020  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0021  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0022  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0023  * OTHER DEALINGS IN THE SOFTWARE.
0024  */
0025 
0026 #include <linux/mm.h>
0027 #include <linux/module.h>
0028 #include <linux/rbtree.h>
0029 #include <linux/slab.h>
0030 #include <linux/spinlock.h>
0031 #include <linux/types.h>
0032 
0033 #include <drm/drm_mm.h>
0034 #include <drm/drm_vma_manager.h>
0035 
0036 /**
0037  * DOC: vma offset manager
0038  *
0039  * The vma-manager is responsible to map arbitrary driver-dependent memory
0040  * regions into the linear user address-space. It provides offsets to the
0041  * caller which can then be used on the address_space of the drm-device. It
0042  * takes care to not overlap regions, size them appropriately and to not
0043  * confuse mm-core by inconsistent fake vm_pgoff fields.
0044  * Drivers shouldn't use this for object placement in VMEM. This manager should
0045  * only be used to manage mappings into linear user-space VMs.
0046  *
0047  * We use drm_mm as backend to manage object allocations. But it is highly
0048  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
0049  * speed up offset lookups.
0050  *
0051  * You must not use multiple offset managers on a single address_space.
0052  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
0053  * no longer be linear.
0054  *
0055  * This offset manager works on page-based addresses. That is, every argument
0056  * and return code (with the exception of drm_vma_node_offset_addr()) is given
0057  * in number of pages, not number of bytes. That means, object sizes and offsets
0058  * must always be page-aligned (as usual).
0059  * If you want to get a valid byte-based user-space address for a given offset,
0060  * please see drm_vma_node_offset_addr().
0061  *
0062  * Additionally to offset management, the vma offset manager also handles access
0063  * management. For every open-file context that is allowed to access a given
0064  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
0065  * open-file with the offset of the node will fail with -EACCES. To revoke
0066  * access again, use drm_vma_node_revoke(). However, the caller is responsible
0067  * for destroying already existing mappings, if required.
0068  */
0069 
0070 /**
0071  * drm_vma_offset_manager_init - Initialize new offset-manager
0072  * @mgr: Manager object
0073  * @page_offset: Offset of available memory area (page-based)
0074  * @size: Size of available address space range (page-based)
0075  *
0076  * Initialize a new offset-manager. The offset and area size available for the
0077  * manager are given as @page_offset and @size. Both are interpreted as
0078  * page-numbers, not bytes.
0079  *
0080  * Adding/removing nodes from the manager is locked internally and protected
0081  * against concurrent access. However, node allocation and destruction is left
0082  * for the caller. While calling into the vma-manager, a given node must
0083  * always be guaranteed to be referenced.
0084  */
0085 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
0086                  unsigned long page_offset, unsigned long size)
0087 {
0088     rwlock_init(&mgr->vm_lock);
0089     drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
0090 }
0091 EXPORT_SYMBOL(drm_vma_offset_manager_init);
0092 
0093 /**
0094  * drm_vma_offset_manager_destroy() - Destroy offset manager
0095  * @mgr: Manager object
0096  *
0097  * Destroy an object manager which was previously created via
0098  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
0099  * before destroying the manager. Otherwise, drm_mm will refuse to free the
0100  * requested resources.
0101  *
0102  * The manager must not be accessed after this function is called.
0103  */
0104 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
0105 {
0106     drm_mm_takedown(&mgr->vm_addr_space_mm);
0107 }
0108 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
0109 
0110 /**
0111  * drm_vma_offset_lookup_locked() - Find node in offset space
0112  * @mgr: Manager object
0113  * @start: Start address for object (page-based)
0114  * @pages: Size of object (page-based)
0115  *
0116  * Find a node given a start address and object size. This returns the _best_
0117  * match for the given node. That is, @start may point somewhere into a valid
0118  * region and the given node will be returned, as long as the node spans the
0119  * whole requested area (given the size in number of pages as @pages).
0120  *
0121  * Note that before lookup the vma offset manager lookup lock must be acquired
0122  * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
0123  * used to implement weakly referenced lookups using kref_get_unless_zero().
0124  *
0125  * Example:
0126  *
0127  * ::
0128  *
0129  *     drm_vma_offset_lock_lookup(mgr);
0130  *     node = drm_vma_offset_lookup_locked(mgr);
0131  *     if (node)
0132  *         kref_get_unless_zero(container_of(node, sth, entr));
0133  *     drm_vma_offset_unlock_lookup(mgr);
0134  *
0135  * RETURNS:
0136  * Returns NULL if no suitable node can be found. Otherwise, the best match
0137  * is returned. It's the caller's responsibility to make sure the node doesn't
0138  * get destroyed before the caller can access it.
0139  */
0140 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
0141                              unsigned long start,
0142                              unsigned long pages)
0143 {
0144     struct drm_mm_node *node, *best;
0145     struct rb_node *iter;
0146     unsigned long offset;
0147 
0148     iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
0149     best = NULL;
0150 
0151     while (likely(iter)) {
0152         node = rb_entry(iter, struct drm_mm_node, rb);
0153         offset = node->start;
0154         if (start >= offset) {
0155             iter = iter->rb_right;
0156             best = node;
0157             if (start == offset)
0158                 break;
0159         } else {
0160             iter = iter->rb_left;
0161         }
0162     }
0163 
0164     /* verify that the node spans the requested area */
0165     if (best) {
0166         offset = best->start + best->size;
0167         if (offset < start + pages)
0168             best = NULL;
0169     }
0170 
0171     if (!best)
0172         return NULL;
0173 
0174     return container_of(best, struct drm_vma_offset_node, vm_node);
0175 }
0176 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
0177 
0178 /**
0179  * drm_vma_offset_add() - Add offset node to manager
0180  * @mgr: Manager object
0181  * @node: Node to be added
0182  * @pages: Allocation size visible to user-space (in number of pages)
0183  *
0184  * Add a node to the offset-manager. If the node was already added, this does
0185  * nothing and return 0. @pages is the size of the object given in number of
0186  * pages.
0187  * After this call succeeds, you can access the offset of the node until it
0188  * is removed again.
0189  *
0190  * If this call fails, it is safe to retry the operation or call
0191  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
0192  * case.
0193  *
0194  * @pages is not required to be the same size as the underlying memory object
0195  * that you want to map. It only limits the size that user-space can map into
0196  * their address space.
0197  *
0198  * RETURNS:
0199  * 0 on success, negative error code on failure.
0200  */
0201 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
0202                struct drm_vma_offset_node *node, unsigned long pages)
0203 {
0204     int ret = 0;
0205 
0206     write_lock(&mgr->vm_lock);
0207 
0208     if (!drm_mm_node_allocated(&node->vm_node))
0209         ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
0210                      &node->vm_node, pages);
0211 
0212     write_unlock(&mgr->vm_lock);
0213 
0214     return ret;
0215 }
0216 EXPORT_SYMBOL(drm_vma_offset_add);
0217 
0218 /**
0219  * drm_vma_offset_remove() - Remove offset node from manager
0220  * @mgr: Manager object
0221  * @node: Node to be removed
0222  *
0223  * Remove a node from the offset manager. If the node wasn't added before, this
0224  * does nothing. After this call returns, the offset and size will be 0 until a
0225  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
0226  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
0227  * offset is allocated.
0228  */
0229 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
0230                struct drm_vma_offset_node *node)
0231 {
0232     write_lock(&mgr->vm_lock);
0233 
0234     if (drm_mm_node_allocated(&node->vm_node)) {
0235         drm_mm_remove_node(&node->vm_node);
0236         memset(&node->vm_node, 0, sizeof(node->vm_node));
0237     }
0238 
0239     write_unlock(&mgr->vm_lock);
0240 }
0241 EXPORT_SYMBOL(drm_vma_offset_remove);
0242 
0243 /**
0244  * drm_vma_node_allow - Add open-file to list of allowed users
0245  * @node: Node to modify
0246  * @tag: Tag of file to remove
0247  *
0248  * Add @tag to the list of allowed open-files for this node. If @tag is
0249  * already on this list, the ref-count is incremented.
0250  *
0251  * The list of allowed-users is preserved across drm_vma_offset_add() and
0252  * drm_vma_offset_remove() calls. You may even call it if the node is currently
0253  * not added to any offset-manager.
0254  *
0255  * You must remove all open-files the same number of times as you added them
0256  * before destroying the node. Otherwise, you will leak memory.
0257  *
0258  * This is locked against concurrent access internally.
0259  *
0260  * RETURNS:
0261  * 0 on success, negative error code on internal failure (out-of-mem)
0262  */
0263 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
0264 {
0265     struct rb_node **iter;
0266     struct rb_node *parent = NULL;
0267     struct drm_vma_offset_file *new, *entry;
0268     int ret = 0;
0269 
0270     /* Preallocate entry to avoid atomic allocations below. It is quite
0271      * unlikely that an open-file is added twice to a single node so we
0272      * don't optimize for this case. OOM is checked below only if the entry
0273      * is actually used. */
0274     new = kmalloc(sizeof(*entry), GFP_KERNEL);
0275 
0276     write_lock(&node->vm_lock);
0277 
0278     iter = &node->vm_files.rb_node;
0279 
0280     while (likely(*iter)) {
0281         parent = *iter;
0282         entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
0283 
0284         if (tag == entry->vm_tag) {
0285             entry->vm_count++;
0286             goto unlock;
0287         } else if (tag > entry->vm_tag) {
0288             iter = &(*iter)->rb_right;
0289         } else {
0290             iter = &(*iter)->rb_left;
0291         }
0292     }
0293 
0294     if (!new) {
0295         ret = -ENOMEM;
0296         goto unlock;
0297     }
0298 
0299     new->vm_tag = tag;
0300     new->vm_count = 1;
0301     rb_link_node(&new->vm_rb, parent, iter);
0302     rb_insert_color(&new->vm_rb, &node->vm_files);
0303     new = NULL;
0304 
0305 unlock:
0306     write_unlock(&node->vm_lock);
0307     kfree(new);
0308     return ret;
0309 }
0310 EXPORT_SYMBOL(drm_vma_node_allow);
0311 
0312 /**
0313  * drm_vma_node_revoke - Remove open-file from list of allowed users
0314  * @node: Node to modify
0315  * @tag: Tag of file to remove
0316  *
0317  * Decrement the ref-count of @tag in the list of allowed open-files on @node.
0318  * If the ref-count drops to zero, remove @tag from the list. You must call
0319  * this once for every drm_vma_node_allow() on @tag.
0320  *
0321  * This is locked against concurrent access internally.
0322  *
0323  * If @tag is not on the list, nothing is done.
0324  */
0325 void drm_vma_node_revoke(struct drm_vma_offset_node *node,
0326              struct drm_file *tag)
0327 {
0328     struct drm_vma_offset_file *entry;
0329     struct rb_node *iter;
0330 
0331     write_lock(&node->vm_lock);
0332 
0333     iter = node->vm_files.rb_node;
0334     while (likely(iter)) {
0335         entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
0336         if (tag == entry->vm_tag) {
0337             if (!--entry->vm_count) {
0338                 rb_erase(&entry->vm_rb, &node->vm_files);
0339                 kfree(entry);
0340             }
0341             break;
0342         } else if (tag > entry->vm_tag) {
0343             iter = iter->rb_right;
0344         } else {
0345             iter = iter->rb_left;
0346         }
0347     }
0348 
0349     write_unlock(&node->vm_lock);
0350 }
0351 EXPORT_SYMBOL(drm_vma_node_revoke);
0352 
0353 /**
0354  * drm_vma_node_is_allowed - Check whether an open-file is granted access
0355  * @node: Node to check
0356  * @tag: Tag of file to remove
0357  *
0358  * Search the list in @node whether @tag is currently on the list of allowed
0359  * open-files (see drm_vma_node_allow()).
0360  *
0361  * This is locked against concurrent access internally.
0362  *
0363  * RETURNS:
0364  * true if @filp is on the list
0365  */
0366 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
0367                  struct drm_file *tag)
0368 {
0369     struct drm_vma_offset_file *entry;
0370     struct rb_node *iter;
0371 
0372     read_lock(&node->vm_lock);
0373 
0374     iter = node->vm_files.rb_node;
0375     while (likely(iter)) {
0376         entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
0377         if (tag == entry->vm_tag)
0378             break;
0379         else if (tag > entry->vm_tag)
0380             iter = iter->rb_right;
0381         else
0382             iter = iter->rb_left;
0383     }
0384 
0385     read_unlock(&node->vm_lock);
0386 
0387     return iter;
0388 }
0389 EXPORT_SYMBOL(drm_vma_node_is_allowed);