0001
0002
0003
0004
0005
0006
0007
0008 #include <drm/drm_managed.h>
0009
0010 #include <linux/list.h>
0011 #include <linux/mutex.h>
0012 #include <linux/slab.h>
0013 #include <linux/spinlock.h>
0014
0015 #include <drm/drm_device.h>
0016 #include <drm/drm_print.h>
0017
0018 #include "drm_internal.h"
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 struct drmres_node {
0040 struct list_head entry;
0041 drmres_release_t release;
0042 const char *name;
0043 size_t size;
0044 };
0045
0046 struct drmres {
0047 struct drmres_node node;
0048
0049
0050
0051
0052
0053
0054
0055 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
0056 };
0057
0058 static void free_dr(struct drmres *dr)
0059 {
0060 kfree_const(dr->node.name);
0061 kfree(dr);
0062 }
0063
0064 void drm_managed_release(struct drm_device *dev)
0065 {
0066 struct drmres *dr, *tmp;
0067
0068 drm_dbg_drmres(dev, "drmres release begin\n");
0069 list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
0070 drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
0071 dr, dr->node.name, dr->node.size);
0072
0073 if (dr->node.release)
0074 dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
0075
0076 list_del(&dr->node.entry);
0077 free_dr(dr);
0078 }
0079 drm_dbg_drmres(dev, "drmres release end\n");
0080 }
0081
0082
0083
0084
0085
0086 static __always_inline struct drmres * alloc_dr(drmres_release_t release,
0087 size_t size, gfp_t gfp, int nid)
0088 {
0089 size_t tot_size;
0090 struct drmres *dr;
0091
0092
0093 if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
0094 return NULL;
0095
0096 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
0097 if (unlikely(!dr))
0098 return NULL;
0099
0100 memset(dr, 0, offsetof(struct drmres, data));
0101
0102 INIT_LIST_HEAD(&dr->node.entry);
0103 dr->node.release = release;
0104 dr->node.size = size;
0105
0106 return dr;
0107 }
0108
0109 static void del_dr(struct drm_device *dev, struct drmres *dr)
0110 {
0111 list_del_init(&dr->node.entry);
0112
0113 drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
0114 dr, dr->node.name, (unsigned long) dr->node.size);
0115 }
0116
0117 static void add_dr(struct drm_device *dev, struct drmres *dr)
0118 {
0119 unsigned long flags;
0120
0121 spin_lock_irqsave(&dev->managed.lock, flags);
0122 list_add(&dr->node.entry, &dev->managed.resources);
0123 spin_unlock_irqrestore(&dev->managed.lock, flags);
0124
0125 drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
0126 dr, dr->node.name, (unsigned long) dr->node.size);
0127 }
0128
0129 void drmm_add_final_kfree(struct drm_device *dev, void *container)
0130 {
0131 WARN_ON(dev->managed.final_kfree);
0132 WARN_ON(dev < (struct drm_device *) container);
0133 WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
0134 dev->managed.final_kfree = container;
0135 }
0136
0137 int __drmm_add_action(struct drm_device *dev,
0138 drmres_release_t action,
0139 void *data, const char *name)
0140 {
0141 struct drmres *dr;
0142 void **void_ptr;
0143
0144 dr = alloc_dr(action, data ? sizeof(void*) : 0,
0145 GFP_KERNEL | __GFP_ZERO,
0146 dev_to_node(dev->dev));
0147 if (!dr) {
0148 drm_dbg_drmres(dev, "failed to add action %s for %p\n",
0149 name, data);
0150 return -ENOMEM;
0151 }
0152
0153 dr->node.name = kstrdup_const(name, GFP_KERNEL);
0154 if (data) {
0155 void_ptr = (void **)&dr->data;
0156 *void_ptr = data;
0157 }
0158
0159 add_dr(dev, dr);
0160
0161 return 0;
0162 }
0163 EXPORT_SYMBOL(__drmm_add_action);
0164
0165 int __drmm_add_action_or_reset(struct drm_device *dev,
0166 drmres_release_t action,
0167 void *data, const char *name)
0168 {
0169 int ret;
0170
0171 ret = __drmm_add_action(dev, action, data, name);
0172 if (ret)
0173 action(dev, data);
0174
0175 return ret;
0176 }
0177 EXPORT_SYMBOL(__drmm_add_action_or_reset);
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
0190 {
0191 struct drmres *dr;
0192
0193 dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
0194 if (!dr) {
0195 drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
0196 size, gfp);
0197 return NULL;
0198 }
0199 dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
0200
0201 add_dr(dev, dr);
0202
0203 return dr->data;
0204 }
0205 EXPORT_SYMBOL(drmm_kmalloc);
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
0218 {
0219 size_t size;
0220 char *buf;
0221
0222 if (!s)
0223 return NULL;
0224
0225 size = strlen(s) + 1;
0226 buf = drmm_kmalloc(dev, size, gfp);
0227 if (buf)
0228 memcpy(buf, s, size);
0229 return buf;
0230 }
0231 EXPORT_SYMBOL_GPL(drmm_kstrdup);
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 void drmm_kfree(struct drm_device *dev, void *data)
0243 {
0244 struct drmres *dr_match = NULL, *dr;
0245 unsigned long flags;
0246
0247 if (!data)
0248 return;
0249
0250 spin_lock_irqsave(&dev->managed.lock, flags);
0251 list_for_each_entry(dr, &dev->managed.resources, node.entry) {
0252 if (dr->data == data) {
0253 dr_match = dr;
0254 del_dr(dev, dr_match);
0255 break;
0256 }
0257 }
0258 spin_unlock_irqrestore(&dev->managed.lock, flags);
0259
0260 if (WARN_ON(!dr_match))
0261 return;
0262
0263 free_dr(dr_match);
0264 }
0265 EXPORT_SYMBOL(drmm_kfree);
0266
0267 static void drmm_mutex_release(struct drm_device *dev, void *res)
0268 {
0269 struct mutex *lock = res;
0270
0271 mutex_destroy(lock);
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
0286 {
0287 mutex_init(lock);
0288
0289 return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
0290 }
0291 EXPORT_SYMBOL(drmm_mutex_init);