Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #ifndef INTEL_ENGINE_PM_H
0007 #define INTEL_ENGINE_PM_H
0008 
0009 #include "i915_drv.h"
0010 #include "i915_request.h"
0011 #include "intel_engine_types.h"
0012 #include "intel_wakeref.h"
0013 #include "intel_gt_pm.h"
0014 
0015 static inline bool
0016 intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
0017 {
0018     return intel_wakeref_is_active(&engine->wakeref);
0019 }
0020 
0021 static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
0022 {
0023     __intel_wakeref_get(&engine->wakeref);
0024 }
0025 
0026 static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
0027 {
0028     intel_wakeref_get(&engine->wakeref);
0029 }
0030 
0031 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
0032 {
0033     return intel_wakeref_get_if_active(&engine->wakeref);
0034 }
0035 
0036 static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
0037 {
0038     if (!intel_engine_is_virtual(engine)) {
0039         intel_wakeref_might_get(&engine->wakeref);
0040     } else {
0041         struct intel_gt *gt = engine->gt;
0042         struct intel_engine_cs *tengine;
0043         intel_engine_mask_t tmp, mask = engine->mask;
0044 
0045         for_each_engine_masked(tengine, gt, mask, tmp)
0046             intel_wakeref_might_get(&tengine->wakeref);
0047     }
0048     intel_gt_pm_might_get(engine->gt);
0049 }
0050 
0051 static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
0052 {
0053     intel_wakeref_put(&engine->wakeref);
0054 }
0055 
0056 static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
0057 {
0058     intel_wakeref_put_async(&engine->wakeref);
0059 }
0060 
0061 static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
0062                          unsigned long delay)
0063 {
0064     intel_wakeref_put_delay(&engine->wakeref, delay);
0065 }
0066 
0067 static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
0068 {
0069     intel_wakeref_unlock_wait(&engine->wakeref);
0070 }
0071 
0072 static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
0073 {
0074     if (!intel_engine_is_virtual(engine)) {
0075         intel_wakeref_might_put(&engine->wakeref);
0076     } else {
0077         struct intel_gt *gt = engine->gt;
0078         struct intel_engine_cs *tengine;
0079         intel_engine_mask_t tmp, mask = engine->mask;
0080 
0081         for_each_engine_masked(tengine, gt, mask, tmp)
0082             intel_wakeref_might_put(&tengine->wakeref);
0083     }
0084     intel_gt_pm_might_put(engine->gt);
0085 }
0086 
0087 static inline struct i915_request *
0088 intel_engine_create_kernel_request(struct intel_engine_cs *engine)
0089 {
0090     struct i915_request *rq;
0091 
0092     /*
0093      * The engine->kernel_context is special as it is used inside
0094      * the engine-pm barrier (see __engine_park()), circumventing
0095      * the usual mutexes and relying on the engine-pm barrier
0096      * instead. So whenever we use the engine->kernel_context
0097      * outside of the barrier, we must manually handle the
0098      * engine wakeref to serialise with the use inside.
0099      */
0100     intel_engine_pm_get(engine);
0101     rq = i915_request_create(engine->kernel_context);
0102     intel_engine_pm_put(engine);
0103 
0104     return rq;
0105 }
0106 
0107 void intel_engine_init__pm(struct intel_engine_cs *engine);
0108 
0109 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
0110 
0111 #endif /* INTEL_ENGINE_PM_H */