Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2020 Intel
0004  *
0005  * Based on drivers/base/devres.c
0006  */
0007 
0008 #include <drm/drm_managed.h>
0009 
0010 #include <linux/list.h>
0011 #include <linux/mutex.h>
0012 #include <linux/slab.h>
0013 #include <linux/spinlock.h>
0014 
0015 #include <drm/drm_device.h>
0016 #include <drm/drm_print.h>
0017 
0018 #include "drm_internal.h"
0019 
0020 /**
0021  * DOC: managed resources
0022  *
0023  * Inspired by struct &device managed resources, but tied to the lifetime of
0024  * struct &drm_device, which can outlive the underlying physical device, usually
0025  * when userspace has some open files and other handles to resources still open.
0026  *
0027  * Release actions can be added with drmm_add_action(), memory allocations can
0028  * be done directly with drmm_kmalloc() and the related functions. Everything
0029  * will be released on the final drm_dev_put() in reverse order of how the
0030  * release actions have been added and memory has been allocated since driver
0031  * loading started with devm_drm_dev_alloc().
0032  *
0033  * Note that release actions and managed memory can also be added and removed
0034  * during the lifetime of the driver, all the functions are fully concurrent
0035  * safe. But it is recommended to use managed resources only for resources that
0036  * change rarely, if ever, during the lifetime of the &drm_device instance.
0037  */
0038 
0039 struct drmres_node {
0040     struct list_head    entry;
0041     drmres_release_t    release;
0042     const char      *name;
0043     size_t          size;
0044 };
0045 
0046 struct drmres {
0047     struct drmres_node      node;
0048     /*
0049      * Some archs want to perform DMA into kmalloc caches
0050      * and need a guaranteed alignment larger than
0051      * the alignment of a 64-bit integer.
0052      * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
0053      * buffer alignment as if it was allocated by plain kmalloc().
0054      */
0055     u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
0056 };
0057 
0058 static void free_dr(struct drmres *dr)
0059 {
0060     kfree_const(dr->node.name);
0061     kfree(dr);
0062 }
0063 
0064 void drm_managed_release(struct drm_device *dev)
0065 {
0066     struct drmres *dr, *tmp;
0067 
0068     drm_dbg_drmres(dev, "drmres release begin\n");
0069     list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
0070         drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
0071                    dr, dr->node.name, dr->node.size);
0072 
0073         if (dr->node.release)
0074             dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
0075 
0076         list_del(&dr->node.entry);
0077         free_dr(dr);
0078     }
0079     drm_dbg_drmres(dev, "drmres release end\n");
0080 }
0081 
0082 /*
0083  * Always inline so that kmalloc_track_caller tracks the actual interesting
0084  * caller outside of drm_managed.c.
0085  */
0086 static __always_inline struct drmres * alloc_dr(drmres_release_t release,
0087                         size_t size, gfp_t gfp, int nid)
0088 {
0089     size_t tot_size;
0090     struct drmres *dr;
0091 
0092     /* We must catch any near-SIZE_MAX cases that could overflow. */
0093     if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
0094         return NULL;
0095 
0096     dr = kmalloc_node_track_caller(tot_size, gfp, nid);
0097     if (unlikely(!dr))
0098         return NULL;
0099 
0100     memset(dr, 0, offsetof(struct drmres, data));
0101 
0102     INIT_LIST_HEAD(&dr->node.entry);
0103     dr->node.release = release;
0104     dr->node.size = size;
0105 
0106     return dr;
0107 }
0108 
0109 static void del_dr(struct drm_device *dev, struct drmres *dr)
0110 {
0111     list_del_init(&dr->node.entry);
0112 
0113     drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
0114                dr, dr->node.name, (unsigned long) dr->node.size);
0115 }
0116 
0117 static void add_dr(struct drm_device *dev, struct drmres *dr)
0118 {
0119     unsigned long flags;
0120 
0121     spin_lock_irqsave(&dev->managed.lock, flags);
0122     list_add(&dr->node.entry, &dev->managed.resources);
0123     spin_unlock_irqrestore(&dev->managed.lock, flags);
0124 
0125     drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
0126                dr, dr->node.name, (unsigned long) dr->node.size);
0127 }
0128 
0129 void drmm_add_final_kfree(struct drm_device *dev, void *container)
0130 {
0131     WARN_ON(dev->managed.final_kfree);
0132     WARN_ON(dev < (struct drm_device *) container);
0133     WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
0134     dev->managed.final_kfree = container;
0135 }
0136 
0137 int __drmm_add_action(struct drm_device *dev,
0138               drmres_release_t action,
0139               void *data, const char *name)
0140 {
0141     struct drmres *dr;
0142     void **void_ptr;
0143 
0144     dr = alloc_dr(action, data ? sizeof(void*) : 0,
0145               GFP_KERNEL | __GFP_ZERO,
0146               dev_to_node(dev->dev));
0147     if (!dr) {
0148         drm_dbg_drmres(dev, "failed to add action %s for %p\n",
0149                    name, data);
0150         return -ENOMEM;
0151     }
0152 
0153     dr->node.name = kstrdup_const(name, GFP_KERNEL);
0154     if (data) {
0155         void_ptr = (void **)&dr->data;
0156         *void_ptr = data;
0157     }
0158 
0159     add_dr(dev, dr);
0160 
0161     return 0;
0162 }
0163 EXPORT_SYMBOL(__drmm_add_action);
0164 
0165 int __drmm_add_action_or_reset(struct drm_device *dev,
0166                    drmres_release_t action,
0167                    void *data, const char *name)
0168 {
0169     int ret;
0170 
0171     ret = __drmm_add_action(dev, action, data, name);
0172     if (ret)
0173         action(dev, data);
0174 
0175     return ret;
0176 }
0177 EXPORT_SYMBOL(__drmm_add_action_or_reset);
0178 
0179 /**
0180  * drmm_kmalloc - &drm_device managed kmalloc()
0181  * @dev: DRM device
0182  * @size: size of the memory allocation
0183  * @gfp: GFP allocation flags
0184  *
0185  * This is a &drm_device managed version of kmalloc(). The allocated memory is
0186  * automatically freed on the final drm_dev_put(). Memory can also be freed
0187  * before the final drm_dev_put() by calling drmm_kfree().
0188  */
0189 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
0190 {
0191     struct drmres *dr;
0192 
0193     dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
0194     if (!dr) {
0195         drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
0196                    size, gfp);
0197         return NULL;
0198     }
0199     dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
0200 
0201     add_dr(dev, dr);
0202 
0203     return dr->data;
0204 }
0205 EXPORT_SYMBOL(drmm_kmalloc);
0206 
0207 /**
0208  * drmm_kstrdup - &drm_device managed kstrdup()
0209  * @dev: DRM device
0210  * @s: 0-terminated string to be duplicated
0211  * @gfp: GFP allocation flags
0212  *
0213  * This is a &drm_device managed version of kstrdup(). The allocated memory is
0214  * automatically freed on the final drm_dev_put() and works exactly like a
0215  * memory allocation obtained by drmm_kmalloc().
0216  */
0217 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
0218 {
0219     size_t size;
0220     char *buf;
0221 
0222     if (!s)
0223         return NULL;
0224 
0225     size = strlen(s) + 1;
0226     buf = drmm_kmalloc(dev, size, gfp);
0227     if (buf)
0228         memcpy(buf, s, size);
0229     return buf;
0230 }
0231 EXPORT_SYMBOL_GPL(drmm_kstrdup);
0232 
0233 /**
0234  * drmm_kfree - &drm_device managed kfree()
0235  * @dev: DRM device
0236  * @data: memory allocation to be freed
0237  *
0238  * This is a &drm_device managed version of kfree() which can be used to
0239  * release memory allocated through drmm_kmalloc() or any of its related
0240  * functions before the final drm_dev_put() of @dev.
0241  */
0242 void drmm_kfree(struct drm_device *dev, void *data)
0243 {
0244     struct drmres *dr_match = NULL, *dr;
0245     unsigned long flags;
0246 
0247     if (!data)
0248         return;
0249 
0250     spin_lock_irqsave(&dev->managed.lock, flags);
0251     list_for_each_entry(dr, &dev->managed.resources, node.entry) {
0252         if (dr->data == data) {
0253             dr_match = dr;
0254             del_dr(dev, dr_match);
0255             break;
0256         }
0257     }
0258     spin_unlock_irqrestore(&dev->managed.lock, flags);
0259 
0260     if (WARN_ON(!dr_match))
0261         return;
0262 
0263     free_dr(dr_match);
0264 }
0265 EXPORT_SYMBOL(drmm_kfree);
0266 
0267 static void drmm_mutex_release(struct drm_device *dev, void *res)
0268 {
0269     struct mutex *lock = res;
0270 
0271     mutex_destroy(lock);
0272 }
0273 
0274 /**
0275  * drmm_mutex_init - &drm_device-managed mutex_init()
0276  * @dev: DRM device
0277  * @lock: lock to be initialized
0278  *
0279  * Returns:
0280  * 0 on success, or a negative errno code otherwise.
0281  *
0282  * This is a &drm_device-managed version of mutex_init(). The initialized
0283  * lock is automatically destroyed on the final drm_dev_put().
0284  */
0285 int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
0286 {
0287     mutex_init(lock);
0288 
0289     return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
0290 }
0291 EXPORT_SYMBOL(drmm_mutex_init);