Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2018 Intel Corporation
0005  */
0006 
0007 #include <linux/kref.h>
0008 #include <linux/string_helpers.h>
0009 
0010 #include "gem/i915_gem_pm.h"
0011 #include "gt/intel_gt.h"
0012 
0013 #include "i915_selftest.h"
0014 
0015 #include "igt_flush_test.h"
0016 #include "lib_sw_fence.h"
0017 
0018 struct live_active {
0019     struct i915_active base;
0020     struct kref ref;
0021     bool retired;
0022 };
0023 
0024 static void __live_get(struct live_active *active)
0025 {
0026     kref_get(&active->ref);
0027 }
0028 
0029 static void __live_free(struct live_active *active)
0030 {
0031     i915_active_fini(&active->base);
0032     kfree(active);
0033 }
0034 
0035 static void __live_release(struct kref *ref)
0036 {
0037     struct live_active *active = container_of(ref, typeof(*active), ref);
0038 
0039     __live_free(active);
0040 }
0041 
0042 static void __live_put(struct live_active *active)
0043 {
0044     kref_put(&active->ref, __live_release);
0045 }
0046 
0047 static int __live_active(struct i915_active *base)
0048 {
0049     struct live_active *active = container_of(base, typeof(*active), base);
0050 
0051     __live_get(active);
0052     return 0;
0053 }
0054 
0055 static void __live_retire(struct i915_active *base)
0056 {
0057     struct live_active *active = container_of(base, typeof(*active), base);
0058 
0059     active->retired = true;
0060     __live_put(active);
0061 }
0062 
0063 static struct live_active *__live_alloc(struct drm_i915_private *i915)
0064 {
0065     struct live_active *active;
0066 
0067     active = kzalloc(sizeof(*active), GFP_KERNEL);
0068     if (!active)
0069         return NULL;
0070 
0071     kref_init(&active->ref);
0072     i915_active_init(&active->base, __live_active, __live_retire, 0);
0073 
0074     return active;
0075 }
0076 
0077 static struct live_active *
0078 __live_active_setup(struct drm_i915_private *i915)
0079 {
0080     struct intel_engine_cs *engine;
0081     struct i915_sw_fence *submit;
0082     struct live_active *active;
0083     unsigned int count = 0;
0084     int err = 0;
0085 
0086     active = __live_alloc(i915);
0087     if (!active)
0088         return ERR_PTR(-ENOMEM);
0089 
0090     submit = heap_fence_create(GFP_KERNEL);
0091     if (!submit) {
0092         kfree(active);
0093         return ERR_PTR(-ENOMEM);
0094     }
0095 
0096     err = i915_active_acquire(&active->base);
0097     if (err)
0098         goto out;
0099 
0100     for_each_uabi_engine(engine, i915) {
0101         struct i915_request *rq;
0102 
0103         rq = intel_engine_create_kernel_request(engine);
0104         if (IS_ERR(rq)) {
0105             err = PTR_ERR(rq);
0106             break;
0107         }
0108 
0109         err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
0110                                submit,
0111                                GFP_KERNEL);
0112         if (err >= 0)
0113             err = i915_active_add_request(&active->base, rq);
0114         i915_request_add(rq);
0115         if (err) {
0116             pr_err("Failed to track active ref!\n");
0117             break;
0118         }
0119 
0120         count++;
0121     }
0122 
0123     i915_active_release(&active->base);
0124     if (READ_ONCE(active->retired) && count) {
0125         pr_err("i915_active retired before submission!\n");
0126         err = -EINVAL;
0127     }
0128     if (atomic_read(&active->base.count) != count) {
0129         pr_err("i915_active not tracking all requests, found %d, expected %d\n",
0130                atomic_read(&active->base.count), count);
0131         err = -EINVAL;
0132     }
0133 
0134 out:
0135     i915_sw_fence_commit(submit);
0136     heap_fence_put(submit);
0137     if (err) {
0138         __live_put(active);
0139         active = ERR_PTR(err);
0140     }
0141 
0142     return active;
0143 }
0144 
0145 static int live_active_wait(void *arg)
0146 {
0147     struct drm_i915_private *i915 = arg;
0148     struct live_active *active;
0149     int err = 0;
0150 
0151     /* Check that we get a callback when requests retire upon waiting */
0152 
0153     active = __live_active_setup(i915);
0154     if (IS_ERR(active))
0155         return PTR_ERR(active);
0156 
0157     __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
0158     if (!READ_ONCE(active->retired)) {
0159         struct drm_printer p = drm_err_printer(__func__);
0160 
0161         pr_err("i915_active not retired after waiting!\n");
0162         i915_active_print(&active->base, &p);
0163 
0164         err = -EINVAL;
0165     }
0166 
0167     __live_put(active);
0168 
0169     if (igt_flush_test(i915))
0170         err = -EIO;
0171 
0172     return err;
0173 }
0174 
0175 static int live_active_retire(void *arg)
0176 {
0177     struct drm_i915_private *i915 = arg;
0178     struct live_active *active;
0179     int err = 0;
0180 
0181     /* Check that we get a callback when requests are indirectly retired */
0182 
0183     active = __live_active_setup(i915);
0184     if (IS_ERR(active))
0185         return PTR_ERR(active);
0186 
0187     /* waits for & retires all requests */
0188     if (igt_flush_test(i915))
0189         err = -EIO;
0190 
0191     if (!READ_ONCE(active->retired)) {
0192         struct drm_printer p = drm_err_printer(__func__);
0193 
0194         pr_err("i915_active not retired after flushing!\n");
0195         i915_active_print(&active->base, &p);
0196 
0197         err = -EINVAL;
0198     }
0199 
0200     __live_put(active);
0201 
0202     return err;
0203 }
0204 
0205 static int live_active_barrier(void *arg)
0206 {
0207     struct drm_i915_private *i915 = arg;
0208     struct intel_engine_cs *engine;
0209     struct live_active *active;
0210     int err = 0;
0211 
0212     /* Check that we get a callback when requests retire upon waiting */
0213 
0214     active = __live_alloc(i915);
0215     if (!active)
0216         return -ENOMEM;
0217 
0218     err = i915_active_acquire(&active->base);
0219     if (err)
0220         goto out;
0221 
0222     for_each_uabi_engine(engine, i915) {
0223         err = i915_active_acquire_preallocate_barrier(&active->base,
0224                                   engine);
0225         if (err)
0226             break;
0227 
0228         i915_active_acquire_barrier(&active->base);
0229     }
0230 
0231     i915_active_release(&active->base);
0232     if (err)
0233         goto out;
0234 
0235     __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
0236     if (!READ_ONCE(active->retired)) {
0237         pr_err("i915_active not retired after flushing barriers!\n");
0238         err = -EINVAL;
0239     }
0240 
0241 out:
0242     __live_put(active);
0243 
0244     if (igt_flush_test(i915))
0245         err = -EIO;
0246 
0247     return err;
0248 }
0249 
0250 int i915_active_live_selftests(struct drm_i915_private *i915)
0251 {
0252     static const struct i915_subtest tests[] = {
0253         SUBTEST(live_active_wait),
0254         SUBTEST(live_active_retire),
0255         SUBTEST(live_active_barrier),
0256     };
0257 
0258     if (intel_gt_is_wedged(to_gt(i915)))
0259         return 0;
0260 
0261     return i915_subtests(tests, i915);
0262 }
0263 
0264 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
0265 {
0266     struct intel_engine_cs *engine;
0267 
0268     if (!is_barrier(&it->base))
0269         return NULL;
0270 
0271     engine = __barrier_to_engine(it);
0272     smp_rmb(); /* serialise with add_active_barriers */
0273     if (!is_barrier(&it->base))
0274         return NULL;
0275 
0276     return engine;
0277 }
0278 
0279 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
0280 {
0281     drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
0282     drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
0283     drm_printf(m, "\tpreallocated barriers? %s\n",
0284            str_yes_no(!llist_empty(&ref->preallocated_barriers)));
0285 
0286     if (i915_active_acquire_if_busy(ref)) {
0287         struct active_node *it, *n;
0288 
0289         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
0290             struct intel_engine_cs *engine;
0291 
0292             engine = node_to_barrier(it);
0293             if (engine) {
0294                 drm_printf(m, "\tbarrier: %s\n", engine->name);
0295                 continue;
0296             }
0297 
0298             if (i915_active_fence_isset(&it->base)) {
0299                 drm_printf(m,
0300                        "\ttimeline: %llx\n", it->timeline);
0301                 continue;
0302             }
0303         }
0304 
0305         i915_active_release(ref);
0306     }
0307 }
0308 
0309 static void spin_unlock_wait(spinlock_t *lock)
0310 {
0311     spin_lock_irq(lock);
0312     spin_unlock_irq(lock);
0313 }
0314 
0315 static void active_flush(struct i915_active *ref,
0316              struct i915_active_fence *active)
0317 {
0318     struct dma_fence *fence;
0319 
0320     fence = xchg(__active_fence_slot(active), NULL);
0321     if (!fence)
0322         return;
0323 
0324     spin_lock_irq(fence->lock);
0325     __list_del_entry(&active->cb.node);
0326     spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
0327     atomic_dec(&ref->count);
0328 
0329     GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
0330 }
0331 
0332 void i915_active_unlock_wait(struct i915_active *ref)
0333 {
0334     if (i915_active_acquire_if_busy(ref)) {
0335         struct active_node *it, *n;
0336 
0337         /* Wait for all active callbacks */
0338         rcu_read_lock();
0339         active_flush(ref, &ref->excl);
0340         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
0341             active_flush(ref, &it->base);
0342         rcu_read_unlock();
0343 
0344         i915_active_release(ref);
0345     }
0346 
0347     /* And wait for the retire callback */
0348     spin_unlock_wait(&ref->tree_lock);
0349 
0350     /* ... which may have been on a thread instead */
0351     flush_work(&ref->work);
0352 }