Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2018 Intel Corporation
0005  */
0006 
0007 #include <linux/nospec.h>
0008 
0009 #include "i915_drv.h"
0010 #include "i915_perf.h"
0011 #include "i915_query.h"
0012 #include "gt/intel_engine_user.h"
0013 #include <uapi/drm/i915_drm.h>
0014 
0015 static int copy_query_item(void *query_hdr, size_t query_sz,
0016                u32 total_length,
0017                struct drm_i915_query_item *query_item)
0018 {
0019     if (query_item->length == 0)
0020         return total_length;
0021 
0022     if (query_item->length < total_length)
0023         return -EINVAL;
0024 
0025     if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
0026                query_sz))
0027         return -EFAULT;
0028 
0029     return 0;
0030 }
0031 
0032 static int fill_topology_info(const struct sseu_dev_info *sseu,
0033                   struct drm_i915_query_item *query_item,
0034                   intel_sseu_ss_mask_t subslice_mask)
0035 {
0036     struct drm_i915_query_topology_info topo;
0037     u32 slice_length, subslice_length, eu_length, total_length;
0038     int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
0039     int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
0040     int ret;
0041 
0042     BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
0043 
0044     if (sseu->max_slices == 0)
0045         return -ENODEV;
0046 
0047     slice_length = sizeof(sseu->slice_mask);
0048     subslice_length = sseu->max_slices * ss_stride;
0049     eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
0050     total_length = sizeof(topo) + slice_length + subslice_length +
0051                eu_length;
0052 
0053     ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
0054 
0055     if (ret != 0)
0056         return ret;
0057 
0058     memset(&topo, 0, sizeof(topo));
0059     topo.max_slices = sseu->max_slices;
0060     topo.max_subslices = sseu->max_subslices;
0061     topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
0062 
0063     topo.subslice_offset = slice_length;
0064     topo.subslice_stride = ss_stride;
0065     topo.eu_offset = slice_length + subslice_length;
0066     topo.eu_stride = eu_stride;
0067 
0068     if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
0069              &topo, sizeof(topo)))
0070         return -EFAULT;
0071 
0072     if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
0073              &sseu->slice_mask, slice_length))
0074         return -EFAULT;
0075 
0076     if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr +
0077                                sizeof(topo) + slice_length),
0078                        sseu))
0079         return -EFAULT;
0080 
0081     if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr +
0082                                sizeof(topo) +
0083                                slice_length + subslice_length),
0084                        sseu))
0085         return -EFAULT;
0086 
0087     return total_length;
0088 }
0089 
0090 static int query_topology_info(struct drm_i915_private *dev_priv,
0091                    struct drm_i915_query_item *query_item)
0092 {
0093     const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
0094 
0095     if (query_item->flags != 0)
0096         return -EINVAL;
0097 
0098     return fill_topology_info(sseu, query_item, sseu->subslice_mask);
0099 }
0100 
0101 static int query_geometry_subslices(struct drm_i915_private *i915,
0102                     struct drm_i915_query_item *query_item)
0103 {
0104     const struct sseu_dev_info *sseu;
0105     struct intel_engine_cs *engine;
0106     struct i915_engine_class_instance classinstance;
0107 
0108     if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
0109         return -ENODEV;
0110 
0111     classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
0112 
0113     engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
0114                       (u8)classinstance.engine_instance);
0115 
0116     if (!engine)
0117         return -EINVAL;
0118 
0119     if (engine->class != RENDER_CLASS)
0120         return -EINVAL;
0121 
0122     sseu = &engine->gt->info.sseu;
0123 
0124     return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
0125 }
0126 
0127 static int
0128 query_engine_info(struct drm_i915_private *i915,
0129           struct drm_i915_query_item *query_item)
0130 {
0131     struct drm_i915_query_engine_info __user *query_ptr =
0132                 u64_to_user_ptr(query_item->data_ptr);
0133     struct drm_i915_engine_info __user *info_ptr;
0134     struct drm_i915_query_engine_info query;
0135     struct drm_i915_engine_info info = { };
0136     unsigned int num_uabi_engines = 0;
0137     struct intel_engine_cs *engine;
0138     int len, ret;
0139 
0140     if (query_item->flags)
0141         return -EINVAL;
0142 
0143     for_each_uabi_engine(engine, i915)
0144         num_uabi_engines++;
0145 
0146     len = struct_size(query_ptr, engines, num_uabi_engines);
0147 
0148     ret = copy_query_item(&query, sizeof(query), len, query_item);
0149     if (ret != 0)
0150         return ret;
0151 
0152     if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
0153         query.rsvd[2])
0154         return -EINVAL;
0155 
0156     info_ptr = &query_ptr->engines[0];
0157 
0158     for_each_uabi_engine(engine, i915) {
0159         info.engine.engine_class = engine->uabi_class;
0160         info.engine.engine_instance = engine->uabi_instance;
0161         info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
0162         info.capabilities = engine->uabi_capabilities;
0163         info.logical_instance = ilog2(engine->logical_mask);
0164 
0165         if (copy_to_user(info_ptr, &info, sizeof(info)))
0166             return -EFAULT;
0167 
0168         query.num_engines++;
0169         info_ptr++;
0170     }
0171 
0172     if (copy_to_user(query_ptr, &query, sizeof(query)))
0173         return -EFAULT;
0174 
0175     return len;
0176 }
0177 
0178 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
0179                             u64 user_regs_ptr,
0180                             u32 kernel_n_regs)
0181 {
0182     /*
0183      * We'll just put the number of registers, and won't copy the
0184      * register.
0185      */
0186     if (user_n_regs == 0)
0187         return 0;
0188 
0189     if (user_n_regs < kernel_n_regs)
0190         return -EINVAL;
0191 
0192     return 0;
0193 }
0194 
0195 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
0196                         u32 kernel_n_regs,
0197                         u64 user_regs_ptr,
0198                         u32 *user_n_regs)
0199 {
0200     u32 __user *p = u64_to_user_ptr(user_regs_ptr);
0201     u32 r;
0202 
0203     if (*user_n_regs == 0) {
0204         *user_n_regs = kernel_n_regs;
0205         return 0;
0206     }
0207 
0208     *user_n_regs = kernel_n_regs;
0209 
0210     if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
0211         return -EFAULT;
0212 
0213     for (r = 0; r < kernel_n_regs; r++, p += 2) {
0214         unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
0215                 p, Efault);
0216         unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
0217     }
0218     user_write_access_end();
0219     return 0;
0220 Efault:
0221     user_write_access_end();
0222     return -EFAULT;
0223 }
0224 
0225 static int query_perf_config_data(struct drm_i915_private *i915,
0226                   struct drm_i915_query_item *query_item,
0227                   bool use_uuid)
0228 {
0229     struct drm_i915_query_perf_config __user *user_query_config_ptr =
0230         u64_to_user_ptr(query_item->data_ptr);
0231     struct drm_i915_perf_oa_config __user *user_config_ptr =
0232         u64_to_user_ptr(query_item->data_ptr +
0233                 sizeof(struct drm_i915_query_perf_config));
0234     struct drm_i915_perf_oa_config user_config;
0235     struct i915_perf *perf = &i915->perf;
0236     struct i915_oa_config *oa_config;
0237     char uuid[UUID_STRING_LEN + 1];
0238     u64 config_id;
0239     u32 flags, total_size;
0240     int ret;
0241 
0242     if (!perf->i915)
0243         return -ENODEV;
0244 
0245     total_size =
0246         sizeof(struct drm_i915_query_perf_config) +
0247         sizeof(struct drm_i915_perf_oa_config);
0248 
0249     if (query_item->length == 0)
0250         return total_size;
0251 
0252     if (query_item->length < total_size) {
0253         DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
0254               query_item->length, total_size);
0255         return -EINVAL;
0256     }
0257 
0258     if (get_user(flags, &user_query_config_ptr->flags))
0259         return -EFAULT;
0260 
0261     if (flags != 0)
0262         return -EINVAL;
0263 
0264     if (use_uuid) {
0265         struct i915_oa_config *tmp;
0266         int id;
0267 
0268         BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
0269 
0270         memset(&uuid, 0, sizeof(uuid));
0271         if (copy_from_user(uuid, user_query_config_ptr->uuid,
0272                      sizeof(user_query_config_ptr->uuid)))
0273             return -EFAULT;
0274 
0275         oa_config = NULL;
0276         rcu_read_lock();
0277         idr_for_each_entry(&perf->metrics_idr, tmp, id) {
0278             if (!strcmp(tmp->uuid, uuid)) {
0279                 oa_config = i915_oa_config_get(tmp);
0280                 break;
0281             }
0282         }
0283         rcu_read_unlock();
0284     } else {
0285         if (get_user(config_id, &user_query_config_ptr->config))
0286             return -EFAULT;
0287 
0288         oa_config = i915_perf_get_oa_config(perf, config_id);
0289     }
0290     if (!oa_config)
0291         return -ENOENT;
0292 
0293     if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
0294         ret = -EFAULT;
0295         goto out;
0296     }
0297 
0298     ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
0299                                user_config.boolean_regs_ptr,
0300                                oa_config->b_counter_regs_len);
0301     if (ret)
0302         goto out;
0303 
0304     ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
0305                                user_config.flex_regs_ptr,
0306                                oa_config->flex_regs_len);
0307     if (ret)
0308         goto out;
0309 
0310     ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
0311                                user_config.mux_regs_ptr,
0312                                oa_config->mux_regs_len);
0313     if (ret)
0314         goto out;
0315 
0316     ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
0317                            oa_config->b_counter_regs_len,
0318                            user_config.boolean_regs_ptr,
0319                            &user_config.n_boolean_regs);
0320     if (ret)
0321         goto out;
0322 
0323     ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
0324                            oa_config->flex_regs_len,
0325                            user_config.flex_regs_ptr,
0326                            &user_config.n_flex_regs);
0327     if (ret)
0328         goto out;
0329 
0330     ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
0331                            oa_config->mux_regs_len,
0332                            user_config.mux_regs_ptr,
0333                            &user_config.n_mux_regs);
0334     if (ret)
0335         goto out;
0336 
0337     memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
0338 
0339     if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
0340         ret = -EFAULT;
0341         goto out;
0342     }
0343 
0344     ret = total_size;
0345 
0346 out:
0347     i915_oa_config_put(oa_config);
0348     return ret;
0349 }
0350 
0351 static size_t sizeof_perf_config_list(size_t count)
0352 {
0353     return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
0354 }
0355 
0356 static size_t sizeof_perf_metrics(struct i915_perf *perf)
0357 {
0358     struct i915_oa_config *tmp;
0359     size_t i;
0360     int id;
0361 
0362     i = 1;
0363     rcu_read_lock();
0364     idr_for_each_entry(&perf->metrics_idr, tmp, id)
0365         i++;
0366     rcu_read_unlock();
0367 
0368     return sizeof_perf_config_list(i);
0369 }
0370 
0371 static int query_perf_config_list(struct drm_i915_private *i915,
0372                   struct drm_i915_query_item *query_item)
0373 {
0374     struct drm_i915_query_perf_config __user *user_query_config_ptr =
0375         u64_to_user_ptr(query_item->data_ptr);
0376     struct i915_perf *perf = &i915->perf;
0377     u64 *oa_config_ids = NULL;
0378     int alloc, n_configs;
0379     u32 flags;
0380     int ret;
0381 
0382     if (!perf->i915)
0383         return -ENODEV;
0384 
0385     if (query_item->length == 0)
0386         return sizeof_perf_metrics(perf);
0387 
0388     if (get_user(flags, &user_query_config_ptr->flags))
0389         return -EFAULT;
0390 
0391     if (flags != 0)
0392         return -EINVAL;
0393 
0394     n_configs = 1;
0395     do {
0396         struct i915_oa_config *tmp;
0397         u64 *ids;
0398         int id;
0399 
0400         ids = krealloc(oa_config_ids,
0401                    n_configs * sizeof(*oa_config_ids),
0402                    GFP_KERNEL);
0403         if (!ids)
0404             return -ENOMEM;
0405 
0406         alloc = fetch_and_zero(&n_configs);
0407 
0408         ids[n_configs++] = 1ull; /* reserved for test_config */
0409         rcu_read_lock();
0410         idr_for_each_entry(&perf->metrics_idr, tmp, id) {
0411             if (n_configs < alloc)
0412                 ids[n_configs] = id;
0413             n_configs++;
0414         }
0415         rcu_read_unlock();
0416 
0417         oa_config_ids = ids;
0418     } while (n_configs > alloc);
0419 
0420     if (query_item->length < sizeof_perf_config_list(n_configs)) {
0421         DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
0422               query_item->length,
0423               sizeof_perf_config_list(n_configs));
0424         kfree(oa_config_ids);
0425         return -EINVAL;
0426     }
0427 
0428     if (put_user(n_configs, &user_query_config_ptr->config)) {
0429         kfree(oa_config_ids);
0430         return -EFAULT;
0431     }
0432 
0433     ret = copy_to_user(user_query_config_ptr + 1,
0434                oa_config_ids,
0435                n_configs * sizeof(*oa_config_ids));
0436     kfree(oa_config_ids);
0437     if (ret)
0438         return -EFAULT;
0439 
0440     return sizeof_perf_config_list(n_configs);
0441 }
0442 
0443 static int query_perf_config(struct drm_i915_private *i915,
0444                  struct drm_i915_query_item *query_item)
0445 {
0446     switch (query_item->flags) {
0447     case DRM_I915_QUERY_PERF_CONFIG_LIST:
0448         return query_perf_config_list(i915, query_item);
0449     case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
0450         return query_perf_config_data(i915, query_item, true);
0451     case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
0452         return query_perf_config_data(i915, query_item, false);
0453     default:
0454         return -EINVAL;
0455     }
0456 }
0457 
0458 static int query_memregion_info(struct drm_i915_private *i915,
0459                 struct drm_i915_query_item *query_item)
0460 {
0461     struct drm_i915_query_memory_regions __user *query_ptr =
0462         u64_to_user_ptr(query_item->data_ptr);
0463     struct drm_i915_memory_region_info __user *info_ptr =
0464         &query_ptr->regions[0];
0465     struct drm_i915_memory_region_info info = { };
0466     struct drm_i915_query_memory_regions query;
0467     struct intel_memory_region *mr;
0468     u32 total_length;
0469     int ret, id, i;
0470 
0471     if (query_item->flags != 0)
0472         return -EINVAL;
0473 
0474     total_length = sizeof(query);
0475     for_each_memory_region(mr, i915, id) {
0476         if (mr->private)
0477             continue;
0478 
0479         total_length += sizeof(info);
0480     }
0481 
0482     ret = copy_query_item(&query, sizeof(query), total_length, query_item);
0483     if (ret != 0)
0484         return ret;
0485 
0486     if (query.num_regions)
0487         return -EINVAL;
0488 
0489     for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
0490         if (query.rsvd[i])
0491             return -EINVAL;
0492     }
0493 
0494     for_each_memory_region(mr, i915, id) {
0495         if (mr->private)
0496             continue;
0497 
0498         info.region.memory_class = mr->type;
0499         info.region.memory_instance = mr->instance;
0500         info.probed_size = mr->total;
0501 
0502         if (mr->type == INTEL_MEMORY_LOCAL)
0503             info.probed_cpu_visible_size = mr->io_size;
0504         else
0505             info.probed_cpu_visible_size = mr->total;
0506 
0507         if (perfmon_capable()) {
0508             intel_memory_region_avail(mr,
0509                           &info.unallocated_size,
0510                           &info.unallocated_cpu_visible_size);
0511         } else {
0512             info.unallocated_size = info.probed_size;
0513             info.unallocated_cpu_visible_size =
0514                 info.probed_cpu_visible_size;
0515         }
0516 
0517         if (__copy_to_user(info_ptr, &info, sizeof(info)))
0518             return -EFAULT;
0519 
0520         query.num_regions++;
0521         info_ptr++;
0522     }
0523 
0524     if (__copy_to_user(query_ptr, &query, sizeof(query)))
0525         return -EFAULT;
0526 
0527     return total_length;
0528 }
0529 
0530 static int query_hwconfig_blob(struct drm_i915_private *i915,
0531                    struct drm_i915_query_item *query_item)
0532 {
0533     struct intel_gt *gt = to_gt(i915);
0534     struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
0535 
0536     if (!hwconfig->size || !hwconfig->ptr)
0537         return -ENODEV;
0538 
0539     if (query_item->length == 0)
0540         return hwconfig->size;
0541 
0542     if (query_item->length < hwconfig->size)
0543         return -EINVAL;
0544 
0545     if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
0546              hwconfig->ptr, hwconfig->size))
0547         return -EFAULT;
0548 
0549     return hwconfig->size;
0550 }
0551 
0552 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
0553                     struct drm_i915_query_item *query_item) = {
0554     query_topology_info,
0555     query_engine_info,
0556     query_perf_config,
0557     query_memregion_info,
0558     query_hwconfig_blob,
0559     query_geometry_subslices,
0560 };
0561 
0562 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
0563 {
0564     struct drm_i915_private *dev_priv = to_i915(dev);
0565     struct drm_i915_query *args = data;
0566     struct drm_i915_query_item __user *user_item_ptr =
0567         u64_to_user_ptr(args->items_ptr);
0568     u32 i;
0569 
0570     if (args->flags != 0)
0571         return -EINVAL;
0572 
0573     for (i = 0; i < args->num_items; i++, user_item_ptr++) {
0574         struct drm_i915_query_item item;
0575         unsigned long func_idx;
0576         int ret;
0577 
0578         if (copy_from_user(&item, user_item_ptr, sizeof(item)))
0579             return -EFAULT;
0580 
0581         if (item.query_id == 0)
0582             return -EINVAL;
0583 
0584         if (overflows_type(item.query_id - 1, unsigned long))
0585             return -EINVAL;
0586 
0587         func_idx = item.query_id - 1;
0588 
0589         ret = -EINVAL;
0590         if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
0591             func_idx = array_index_nospec(func_idx,
0592                               ARRAY_SIZE(i915_query_funcs));
0593             ret = i915_query_funcs[func_idx](dev_priv, &item);
0594         }
0595 
0596         /* Only write the length back to userspace if they differ. */
0597         if (ret != item.length && put_user(ret, &user_item_ptr->length))
0598             return -EFAULT;
0599     }
0600 
0601     return 0;
0602 }