Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2020 Intel Corporation
0004  */
0005 
0006 #include <drm/drm_fourcc.h>
0007 
0008 #include "gem/i915_gem_ioctls.h"
0009 #include "gem/i915_gem_lmem.h"
0010 #include "gem/i915_gem_region.h"
0011 #include "pxp/intel_pxp.h"
0012 
0013 #include "i915_drv.h"
0014 #include "i915_gem_create.h"
0015 #include "i915_trace.h"
0016 #include "i915_user_extensions.h"
0017 
0018 static u32 object_max_page_size(struct intel_memory_region **placements,
0019                 unsigned int n_placements)
0020 {
0021     u32 max_page_size = 0;
0022     int i;
0023 
0024     for (i = 0; i < n_placements; i++) {
0025         struct intel_memory_region *mr = placements[i];
0026 
0027         GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
0028         max_page_size = max_t(u32, max_page_size, mr->min_page_size);
0029     }
0030 
0031     GEM_BUG_ON(!max_page_size);
0032     return max_page_size;
0033 }
0034 
0035 static int object_set_placements(struct drm_i915_gem_object *obj,
0036                  struct intel_memory_region **placements,
0037                  unsigned int n_placements)
0038 {
0039     struct intel_memory_region **arr;
0040     unsigned int i;
0041 
0042     GEM_BUG_ON(!n_placements);
0043 
0044     /*
0045      * For the common case of one memory region, skip storing an
0046      * allocated array and just point at the region directly.
0047      */
0048     if (n_placements == 1) {
0049         struct intel_memory_region *mr = placements[0];
0050         struct drm_i915_private *i915 = mr->i915;
0051 
0052         obj->mm.placements = &i915->mm.regions[mr->id];
0053         obj->mm.n_placements = 1;
0054     } else {
0055         arr = kmalloc_array(n_placements,
0056                     sizeof(struct intel_memory_region *),
0057                     GFP_KERNEL);
0058         if (!arr)
0059             return -ENOMEM;
0060 
0061         for (i = 0; i < n_placements; i++)
0062             arr[i] = placements[i];
0063 
0064         obj->mm.placements = arr;
0065         obj->mm.n_placements = n_placements;
0066     }
0067 
0068     return 0;
0069 }
0070 
0071 static int i915_gem_publish(struct drm_i915_gem_object *obj,
0072                 struct drm_file *file,
0073                 u64 *size_p,
0074                 u32 *handle_p)
0075 {
0076     u64 size = obj->base.size;
0077     int ret;
0078 
0079     ret = drm_gem_handle_create(file, &obj->base, handle_p);
0080     /* drop reference from allocate - handle holds it now */
0081     i915_gem_object_put(obj);
0082     if (ret)
0083         return ret;
0084 
0085     *size_p = size;
0086     return 0;
0087 }
0088 
0089 static struct drm_i915_gem_object *
0090 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
0091                   struct intel_memory_region **placements,
0092                   unsigned int n_placements,
0093                   unsigned int ext_flags)
0094 {
0095     struct intel_memory_region *mr = placements[0];
0096     struct drm_i915_gem_object *obj;
0097     unsigned int flags;
0098     int ret;
0099 
0100     i915_gem_flush_free_objects(i915);
0101 
0102     size = round_up(size, object_max_page_size(placements, n_placements));
0103     if (size == 0)
0104         return ERR_PTR(-EINVAL);
0105 
0106     /* For most of the ABI (e.g. mmap) we think in system pages */
0107     GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
0108 
0109     if (i915_gem_object_size_2big(size))
0110         return ERR_PTR(-E2BIG);
0111 
0112     obj = i915_gem_object_alloc();
0113     if (!obj)
0114         return ERR_PTR(-ENOMEM);
0115 
0116     ret = object_set_placements(obj, placements, n_placements);
0117     if (ret)
0118         goto object_free;
0119 
0120     /*
0121      * I915_BO_ALLOC_USER will make sure the object is cleared before
0122      * any user access.
0123      */
0124     flags = I915_BO_ALLOC_USER;
0125 
0126     ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
0127     if (ret)
0128         goto object_free;
0129 
0130     GEM_BUG_ON(size != obj->base.size);
0131 
0132     /* Add any flag set by create_ext options */
0133     obj->flags |= ext_flags;
0134 
0135     trace_i915_gem_object_create(obj);
0136     return obj;
0137 
0138 object_free:
0139     if (obj->mm.n_placements > 1)
0140         kfree(obj->mm.placements);
0141     i915_gem_object_free(obj);
0142     return ERR_PTR(ret);
0143 }
0144 
0145 /**
0146  * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
0147  * @i915: i915 private
0148  * @size: size of the buffer, in bytes
0149  * @placements: possible placement regions, in priority order
0150  * @n_placements: number of possible placement regions
0151  *
0152  * This function is exposed primarily for selftests and does very little
0153  * error checking.  It is assumed that the set of placement regions has
0154  * already been verified to be valid.
0155  */
0156 struct drm_i915_gem_object *
0157 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
0158                   struct intel_memory_region **placements,
0159                   unsigned int n_placements)
0160 {
0161     return __i915_gem_object_create_user_ext(i915, size, placements,
0162                          n_placements, 0);
0163 }
0164 
0165 int
0166 i915_gem_dumb_create(struct drm_file *file,
0167              struct drm_device *dev,
0168              struct drm_mode_create_dumb *args)
0169 {
0170     struct drm_i915_gem_object *obj;
0171     struct intel_memory_region *mr;
0172     enum intel_memory_type mem_type;
0173     int cpp = DIV_ROUND_UP(args->bpp, 8);
0174     u32 format;
0175 
0176     switch (cpp) {
0177     case 1:
0178         format = DRM_FORMAT_C8;
0179         break;
0180     case 2:
0181         format = DRM_FORMAT_RGB565;
0182         break;
0183     case 4:
0184         format = DRM_FORMAT_XRGB8888;
0185         break;
0186     default:
0187         return -EINVAL;
0188     }
0189 
0190     /* have to work out size/pitch and return them */
0191     args->pitch = ALIGN(args->width * cpp, 64);
0192 
0193     /* align stride to page size so that we can remap */
0194     if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
0195                             DRM_FORMAT_MOD_LINEAR))
0196         args->pitch = ALIGN(args->pitch, 4096);
0197 
0198     if (args->pitch < args->width)
0199         return -EINVAL;
0200 
0201     args->size = mul_u32_u32(args->pitch, args->height);
0202 
0203     mem_type = INTEL_MEMORY_SYSTEM;
0204     if (HAS_LMEM(to_i915(dev)))
0205         mem_type = INTEL_MEMORY_LOCAL;
0206 
0207     mr = intel_memory_region_by_type(to_i915(dev), mem_type);
0208 
0209     obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
0210     if (IS_ERR(obj))
0211         return PTR_ERR(obj);
0212 
0213     return i915_gem_publish(obj, file, &args->size, &args->handle);
0214 }
0215 
0216 /**
0217  * Creates a new mm object and returns a handle to it.
0218  * @dev: drm device pointer
0219  * @data: ioctl data blob
0220  * @file: drm file pointer
0221  */
0222 int
0223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
0224               struct drm_file *file)
0225 {
0226     struct drm_i915_private *i915 = to_i915(dev);
0227     struct drm_i915_gem_create *args = data;
0228     struct drm_i915_gem_object *obj;
0229     struct intel_memory_region *mr;
0230 
0231     mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
0232 
0233     obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
0234     if (IS_ERR(obj))
0235         return PTR_ERR(obj);
0236 
0237     return i915_gem_publish(obj, file, &args->size, &args->handle);
0238 }
0239 
0240 struct create_ext {
0241     struct drm_i915_private *i915;
0242     struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
0243     unsigned int n_placements;
0244     unsigned int placement_mask;
0245     unsigned long flags;
0246 };
0247 
0248 static void repr_placements(char *buf, size_t size,
0249                 struct intel_memory_region **placements,
0250                 int n_placements)
0251 {
0252     int i;
0253 
0254     buf[0] = '\0';
0255 
0256     for (i = 0; i < n_placements; i++) {
0257         struct intel_memory_region *mr = placements[i];
0258         int r;
0259 
0260         r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
0261                  mr->name, mr->type, mr->instance);
0262         if (r >= size)
0263             return;
0264 
0265         buf += r;
0266         size -= r;
0267     }
0268 }
0269 
0270 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
0271               struct create_ext *ext_data)
0272 {
0273     struct drm_i915_private *i915 = ext_data->i915;
0274     struct drm_i915_gem_memory_class_instance __user *uregions =
0275         u64_to_user_ptr(args->regions);
0276     struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
0277     u32 mask;
0278     int i, ret = 0;
0279 
0280     if (args->pad) {
0281         drm_dbg(&i915->drm, "pad should be zero\n");
0282         ret = -EINVAL;
0283     }
0284 
0285     if (!args->num_regions) {
0286         drm_dbg(&i915->drm, "num_regions is zero\n");
0287         ret = -EINVAL;
0288     }
0289 
0290     BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
0291     BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
0292     if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
0293         drm_dbg(&i915->drm, "num_regions is too large\n");
0294         ret = -EINVAL;
0295     }
0296 
0297     if (ret)
0298         return ret;
0299 
0300     mask = 0;
0301     for (i = 0; i < args->num_regions; i++) {
0302         struct drm_i915_gem_memory_class_instance region;
0303         struct intel_memory_region *mr;
0304 
0305         if (copy_from_user(&region, uregions, sizeof(region)))
0306             return -EFAULT;
0307 
0308         mr = intel_memory_region_lookup(i915,
0309                         region.memory_class,
0310                         region.memory_instance);
0311         if (!mr || mr->private) {
0312             drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
0313                 region.memory_class, region.memory_instance, i);
0314             ret = -EINVAL;
0315             goto out_dump;
0316         }
0317 
0318         if (mask & BIT(mr->id)) {
0319             drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
0320                 mr->name, region.memory_class,
0321                 region.memory_instance, i);
0322             ret = -EINVAL;
0323             goto out_dump;
0324         }
0325 
0326         placements[i] = mr;
0327         mask |= BIT(mr->id);
0328 
0329         ++uregions;
0330     }
0331 
0332     if (ext_data->n_placements) {
0333         ret = -EINVAL;
0334         goto out_dump;
0335     }
0336 
0337     ext_data->n_placements = args->num_regions;
0338     for (i = 0; i < args->num_regions; i++)
0339         ext_data->placements[i] = placements[i];
0340 
0341     ext_data->placement_mask = mask;
0342     return 0;
0343 
0344 out_dump:
0345     if (1) {
0346         char buf[256];
0347 
0348         if (ext_data->n_placements) {
0349             repr_placements(buf,
0350                     sizeof(buf),
0351                     ext_data->placements,
0352                     ext_data->n_placements);
0353             drm_dbg(&i915->drm,
0354                 "Placements were already set in previous EXT. Existing placements: %s\n",
0355                 buf);
0356         }
0357 
0358         repr_placements(buf, sizeof(buf), placements, i);
0359         drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
0360     }
0361 
0362     return ret;
0363 }
0364 
0365 static int ext_set_placements(struct i915_user_extension __user *base,
0366                   void *data)
0367 {
0368     struct drm_i915_gem_create_ext_memory_regions ext;
0369 
0370     if (copy_from_user(&ext, base, sizeof(ext)))
0371         return -EFAULT;
0372 
0373     return set_placements(&ext, data);
0374 }
0375 
0376 static int ext_set_protected(struct i915_user_extension __user *base, void *data)
0377 {
0378     struct drm_i915_gem_create_ext_protected_content ext;
0379     struct create_ext *ext_data = data;
0380 
0381     if (copy_from_user(&ext, base, sizeof(ext)))
0382         return -EFAULT;
0383 
0384     if (ext.flags)
0385         return -EINVAL;
0386 
0387     if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
0388         return -ENODEV;
0389 
0390     ext_data->flags |= I915_BO_PROTECTED;
0391 
0392     return 0;
0393 }
0394 
0395 static const i915_user_extension_fn create_extensions[] = {
0396     [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
0397     [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
0398 };
0399 
0400 /**
0401  * Creates a new mm object and returns a handle to it.
0402  * @dev: drm device pointer
0403  * @data: ioctl data blob
0404  * @file: drm file pointer
0405  */
0406 int
0407 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
0408               struct drm_file *file)
0409 {
0410     struct drm_i915_private *i915 = to_i915(dev);
0411     struct drm_i915_gem_create_ext *args = data;
0412     struct create_ext ext_data = { .i915 = i915 };
0413     struct drm_i915_gem_object *obj;
0414     int ret;
0415 
0416     if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
0417         return -EINVAL;
0418 
0419     ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
0420                    create_extensions,
0421                    ARRAY_SIZE(create_extensions),
0422                    &ext_data);
0423     if (ret)
0424         return ret;
0425 
0426     if (!ext_data.n_placements) {
0427         ext_data.placements[0] =
0428             intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
0429         ext_data.n_placements = 1;
0430     }
0431 
0432     if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
0433         if (ext_data.n_placements == 1)
0434             return -EINVAL;
0435 
0436         /*
0437          * We always need to be able to spill to system memory, if we
0438          * can't place in the mappable part of LMEM.
0439          */
0440         if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
0441             return -EINVAL;
0442     } else {
0443         if (ext_data.n_placements > 1 ||
0444             ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
0445             ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
0446     }
0447 
0448     obj = __i915_gem_object_create_user_ext(i915, args->size,
0449                         ext_data.placements,
0450                         ext_data.n_placements,
0451                         ext_data.flags);
0452     if (IS_ERR(obj))
0453         return PTR_ERR(obj);
0454 
0455     return i915_gem_publish(obj, file, &args->size, &args->handle);
0456 }