Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <linux/kobject.h>
0007 #include <linux/sysfs.h>
0008 
0009 #include "i915_drv.h"
0010 #include "intel_engine.h"
0011 #include "intel_engine_heartbeat.h"
0012 #include "sysfs_engines.h"
0013 
0014 struct kobj_engine {
0015     struct kobject base;
0016     struct intel_engine_cs *engine;
0017 };
0018 
0019 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
0020 {
0021     return container_of(kobj, struct kobj_engine, base)->engine;
0022 }
0023 
0024 static ssize_t
0025 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0026 {
0027     return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
0028 }
0029 
0030 static struct kobj_attribute name_attr =
0031 __ATTR(name, 0444, name_show, NULL);
0032 
0033 static ssize_t
0034 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0035 {
0036     return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
0037 }
0038 
0039 static struct kobj_attribute class_attr =
0040 __ATTR(class, 0444, class_show, NULL);
0041 
0042 static ssize_t
0043 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0044 {
0045     return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
0046 }
0047 
0048 static struct kobj_attribute inst_attr =
0049 __ATTR(instance, 0444, inst_show, NULL);
0050 
0051 static ssize_t
0052 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0053 {
0054     return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
0055 }
0056 
0057 static struct kobj_attribute mmio_attr =
0058 __ATTR(mmio_base, 0444, mmio_show, NULL);
0059 
0060 static const char * const vcs_caps[] = {
0061     [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
0062     [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
0063 };
0064 
0065 static const char * const vecs_caps[] = {
0066     [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
0067 };
0068 
0069 static ssize_t repr_trim(char *buf, ssize_t len)
0070 {
0071     /* Trim off the trailing space and replace with a newline */
0072     if (len > PAGE_SIZE)
0073         len = PAGE_SIZE;
0074     if (len > 0)
0075         buf[len - 1] = '\n';
0076 
0077     return len;
0078 }
0079 
0080 static ssize_t
0081 __caps_show(struct intel_engine_cs *engine,
0082         unsigned long caps, char *buf, bool show_unknown)
0083 {
0084     const char * const *repr;
0085     int count, n;
0086     ssize_t len;
0087 
0088     switch (engine->class) {
0089     case VIDEO_DECODE_CLASS:
0090         repr = vcs_caps;
0091         count = ARRAY_SIZE(vcs_caps);
0092         break;
0093 
0094     case VIDEO_ENHANCEMENT_CLASS:
0095         repr = vecs_caps;
0096         count = ARRAY_SIZE(vecs_caps);
0097         break;
0098 
0099     default:
0100         repr = NULL;
0101         count = 0;
0102         break;
0103     }
0104     GEM_BUG_ON(count > BITS_PER_LONG);
0105 
0106     len = 0;
0107     for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
0108         if (n >= count || !repr[n]) {
0109             if (GEM_WARN_ON(show_unknown))
0110                 len += snprintf(buf + len, PAGE_SIZE - len,
0111                         "[%x] ", n);
0112         } else {
0113             len += snprintf(buf + len, PAGE_SIZE - len,
0114                     "%s ", repr[n]);
0115         }
0116         if (GEM_WARN_ON(len >= PAGE_SIZE))
0117             break;
0118     }
0119     return repr_trim(buf, len);
0120 }
0121 
0122 static ssize_t
0123 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0124 {
0125     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0126 
0127     return __caps_show(engine, engine->uabi_capabilities, buf, true);
0128 }
0129 
0130 static struct kobj_attribute caps_attr =
0131 __ATTR(capabilities, 0444, caps_show, NULL);
0132 
0133 static ssize_t
0134 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0135 {
0136     return __caps_show(kobj_to_engine(kobj), -1, buf, false);
0137 }
0138 
0139 static struct kobj_attribute all_caps_attr =
0140 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
0141 
0142 static ssize_t
0143 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
0144            const char *buf, size_t count)
0145 {
0146     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0147     unsigned long long duration;
0148     int err;
0149 
0150     /*
0151      * When waiting for a request, if is it currently being executed
0152      * on the GPU, we busywait for a short while before sleeping. The
0153      * premise is that most requests are short, and if it is already
0154      * executing then there is a good chance that it will complete
0155      * before we can setup the interrupt handler and go to sleep.
0156      * We try to offset the cost of going to sleep, by first spinning
0157      * on the request -- if it completed in less time than it would take
0158      * to go sleep, process the interrupt and return back to the client,
0159      * then we have saved the client some latency, albeit at the cost
0160      * of spinning on an expensive CPU core.
0161      *
0162      * While we try to avoid waiting at all for a request that is unlikely
0163      * to complete, deciding how long it is worth spinning is for is an
0164      * arbitrary decision: trading off power vs latency.
0165      */
0166 
0167     err = kstrtoull(buf, 0, &duration);
0168     if (err)
0169         return err;
0170 
0171     if (duration > jiffies_to_nsecs(2))
0172         return -EINVAL;
0173 
0174     WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
0175 
0176     return count;
0177 }
0178 
0179 static ssize_t
0180 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0181 {
0182     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0183 
0184     return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
0185 }
0186 
0187 static struct kobj_attribute max_spin_attr =
0188 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
0189 
0190 static ssize_t
0191 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0192 {
0193     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0194 
0195     return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
0196 }
0197 
0198 static struct kobj_attribute max_spin_def =
0199 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
0200 
0201 static ssize_t
0202 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
0203         const char *buf, size_t count)
0204 {
0205     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0206     unsigned long long duration;
0207     int err;
0208 
0209     /*
0210      * Execlists uses a scheduling quantum (a timeslice) to alternate
0211      * execution between ready-to-run contexts of equal priority. This
0212      * ensures that all users (though only if they of equal importance)
0213      * have the opportunity to run and prevents livelocks where contexts
0214      * may have implicit ordering due to userspace semaphores.
0215      */
0216 
0217     err = kstrtoull(buf, 0, &duration);
0218     if (err)
0219         return err;
0220 
0221     if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
0222         return -EINVAL;
0223 
0224     WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
0225 
0226     if (execlists_active(&engine->execlists))
0227         set_timer_ms(&engine->execlists.timer, duration);
0228 
0229     return count;
0230 }
0231 
0232 static ssize_t
0233 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0234 {
0235     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0236 
0237     return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
0238 }
0239 
0240 static struct kobj_attribute timeslice_duration_attr =
0241 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
0242 
0243 static ssize_t
0244 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0245 {
0246     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0247 
0248     return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
0249 }
0250 
0251 static struct kobj_attribute timeslice_duration_def =
0252 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
0253 
0254 static ssize_t
0255 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
0256        const char *buf, size_t count)
0257 {
0258     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0259     unsigned long long duration;
0260     int err;
0261 
0262     /*
0263      * When we allow ourselves to sleep before a GPU reset after disabling
0264      * submission, even for a few milliseconds, gives an innocent context
0265      * the opportunity to clear the GPU before the reset occurs. However,
0266      * how long to sleep depends on the typical non-preemptible duration
0267      * (a similar problem to determining the ideal preempt-reset timeout
0268      * or even the heartbeat interval).
0269      */
0270 
0271     err = kstrtoull(buf, 0, &duration);
0272     if (err)
0273         return err;
0274 
0275     if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
0276         return -EINVAL;
0277 
0278     WRITE_ONCE(engine->props.stop_timeout_ms, duration);
0279     return count;
0280 }
0281 
0282 static ssize_t
0283 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0284 {
0285     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0286 
0287     return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
0288 }
0289 
0290 static struct kobj_attribute stop_timeout_attr =
0291 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
0292 
0293 static ssize_t
0294 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0295 {
0296     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0297 
0298     return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
0299 }
0300 
0301 static struct kobj_attribute stop_timeout_def =
0302 __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
0303 
0304 static ssize_t
0305 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
0306               const char *buf, size_t count)
0307 {
0308     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0309     unsigned long long timeout;
0310     int err;
0311 
0312     /*
0313      * After initialising a preemption request, we give the current
0314      * resident a small amount of time to vacate the GPU. The preemption
0315      * request is for a higher priority context and should be immediate to
0316      * maintain high quality of service (and avoid priority inversion).
0317      * However, the preemption granularity of the GPU can be quite coarse
0318      * and so we need a compromise.
0319      */
0320 
0321     err = kstrtoull(buf, 0, &timeout);
0322     if (err)
0323         return err;
0324 
0325     if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
0326         return -EINVAL;
0327 
0328     WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
0329 
0330     if (READ_ONCE(engine->execlists.pending[0]))
0331         set_timer_ms(&engine->execlists.preempt, timeout);
0332 
0333     return count;
0334 }
0335 
0336 static ssize_t
0337 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
0338              char *buf)
0339 {
0340     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0341 
0342     return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
0343 }
0344 
0345 static struct kobj_attribute preempt_timeout_attr =
0346 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
0347 
0348 static ssize_t
0349 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
0350             char *buf)
0351 {
0352     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0353 
0354     return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
0355 }
0356 
0357 static struct kobj_attribute preempt_timeout_def =
0358 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
0359 
0360 static ssize_t
0361 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
0362         const char *buf, size_t count)
0363 {
0364     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0365     unsigned long long delay;
0366     int err;
0367 
0368     /*
0369      * We monitor the health of the system via periodic heartbeat pulses.
0370      * The pulses also provide the opportunity to perform garbage
0371      * collection.  However, we interpret an incomplete pulse (a missed
0372      * heartbeat) as an indication that the system is no longer responsive,
0373      * i.e. hung, and perform an engine or full GPU reset. Given that the
0374      * preemption granularity can be very coarse on a system, the optimal
0375      * value for any workload is unknowable!
0376      */
0377 
0378     err = kstrtoull(buf, 0, &delay);
0379     if (err)
0380         return err;
0381 
0382     if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
0383         return -EINVAL;
0384 
0385     err = intel_engine_set_heartbeat(engine, delay);
0386     if (err)
0387         return err;
0388 
0389     return count;
0390 }
0391 
0392 static ssize_t
0393 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0394 {
0395     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0396 
0397     return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
0398 }
0399 
0400 static struct kobj_attribute heartbeat_interval_attr =
0401 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
0402 
0403 static ssize_t
0404 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
0405 {
0406     struct intel_engine_cs *engine = kobj_to_engine(kobj);
0407 
0408     return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
0409 }
0410 
0411 static struct kobj_attribute heartbeat_interval_def =
0412 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
0413 
0414 static void kobj_engine_release(struct kobject *kobj)
0415 {
0416     kfree(kobj);
0417 }
0418 
0419 static struct kobj_type kobj_engine_type = {
0420     .release = kobj_engine_release,
0421     .sysfs_ops = &kobj_sysfs_ops
0422 };
0423 
0424 static struct kobject *
0425 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
0426 {
0427     struct kobj_engine *ke;
0428 
0429     ke = kzalloc(sizeof(*ke), GFP_KERNEL);
0430     if (!ke)
0431         return NULL;
0432 
0433     kobject_init(&ke->base, &kobj_engine_type);
0434     ke->engine = engine;
0435 
0436     if (kobject_add(&ke->base, dir, "%s", engine->name)) {
0437         kobject_put(&ke->base);
0438         return NULL;
0439     }
0440 
0441     /* xfer ownership to sysfs tree */
0442     return &ke->base;
0443 }
0444 
0445 static void add_defaults(struct kobj_engine *parent)
0446 {
0447     static const struct attribute *files[] = {
0448         &max_spin_def.attr,
0449         &stop_timeout_def.attr,
0450 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
0451         &heartbeat_interval_def.attr,
0452 #endif
0453         NULL
0454     };
0455     struct kobj_engine *ke;
0456 
0457     ke = kzalloc(sizeof(*ke), GFP_KERNEL);
0458     if (!ke)
0459         return;
0460 
0461     kobject_init(&ke->base, &kobj_engine_type);
0462     ke->engine = parent->engine;
0463 
0464     if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
0465         kobject_put(&ke->base);
0466         return;
0467     }
0468 
0469     if (sysfs_create_files(&ke->base, files))
0470         return;
0471 
0472     if (intel_engine_has_timeslices(ke->engine) &&
0473         sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
0474         return;
0475 
0476     if (intel_engine_has_preempt_reset(ke->engine) &&
0477         sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
0478         return;
0479 }
0480 
0481 void intel_engines_add_sysfs(struct drm_i915_private *i915)
0482 {
0483     static const struct attribute *files[] = {
0484         &name_attr.attr,
0485         &class_attr.attr,
0486         &inst_attr.attr,
0487         &mmio_attr.attr,
0488         &caps_attr.attr,
0489         &all_caps_attr.attr,
0490         &max_spin_attr.attr,
0491         &stop_timeout_attr.attr,
0492 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
0493         &heartbeat_interval_attr.attr,
0494 #endif
0495         NULL
0496     };
0497 
0498     struct device *kdev = i915->drm.primary->kdev;
0499     struct intel_engine_cs *engine;
0500     struct kobject *dir;
0501 
0502     dir = kobject_create_and_add("engine", &kdev->kobj);
0503     if (!dir)
0504         return;
0505 
0506     for_each_uabi_engine(engine, i915) {
0507         struct kobject *kobj;
0508 
0509         kobj = kobj_engine(dir, engine);
0510         if (!kobj)
0511             goto err_engine;
0512 
0513         if (sysfs_create_files(kobj, files))
0514             goto err_object;
0515 
0516         if (intel_engine_has_timeslices(engine) &&
0517             sysfs_create_file(kobj, &timeslice_duration_attr.attr))
0518             goto err_engine;
0519 
0520         if (intel_engine_has_preempt_reset(engine) &&
0521             sysfs_create_file(kobj, &preempt_timeout_attr.attr))
0522             goto err_engine;
0523 
0524         add_defaults(container_of(kobj, struct kobj_engine, base));
0525 
0526         if (0) {
0527 err_object:
0528             kobject_put(kobj);
0529 err_engine:
0530             dev_err(kdev, "Failed to add sysfs engine '%s'\n",
0531                 engine->name);
0532             break;
0533         }
0534     }
0535 }