Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2019 Intel Corporation
0005  */
0006 
0007 #include <linux/wait_bit.h>
0008 
0009 #include "intel_runtime_pm.h"
0010 #include "intel_wakeref.h"
0011 
0012 static void rpm_get(struct intel_wakeref *wf)
0013 {
0014     wf->wakeref = intel_runtime_pm_get(wf->rpm);
0015 }
0016 
0017 static void rpm_put(struct intel_wakeref *wf)
0018 {
0019     intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
0020 
0021     intel_runtime_pm_put(wf->rpm, wakeref);
0022     INTEL_WAKEREF_BUG_ON(!wakeref);
0023 }
0024 
0025 int __intel_wakeref_get_first(struct intel_wakeref *wf)
0026 {
0027     /*
0028      * Treat get/put as different subclasses, as we may need to run
0029      * the put callback from under the shrinker and do not want to
0030      * cross-contanimate that callback with any extra work performed
0031      * upon acquiring the wakeref.
0032      */
0033     mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
0034     if (!atomic_read(&wf->count)) {
0035         int err;
0036 
0037         rpm_get(wf);
0038 
0039         err = wf->ops->get(wf);
0040         if (unlikely(err)) {
0041             rpm_put(wf);
0042             mutex_unlock(&wf->mutex);
0043             return err;
0044         }
0045 
0046         smp_mb__before_atomic(); /* release wf->count */
0047     }
0048     atomic_inc(&wf->count);
0049     mutex_unlock(&wf->mutex);
0050 
0051     INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0052     return 0;
0053 }
0054 
0055 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
0056 {
0057     INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
0058     if (unlikely(!atomic_dec_and_test(&wf->count)))
0059         goto unlock;
0060 
0061     /* ops->put() must reschedule its own release on error/deferral */
0062     if (likely(!wf->ops->put(wf))) {
0063         rpm_put(wf);
0064         wake_up_var(&wf->wakeref);
0065     }
0066 
0067 unlock:
0068     mutex_unlock(&wf->mutex);
0069 }
0070 
0071 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
0072 {
0073     INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
0074 
0075     /* Assume we are not in process context and so cannot sleep. */
0076     if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
0077         mod_delayed_work(system_wq, &wf->work,
0078                  FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
0079         return;
0080     }
0081 
0082     ____intel_wakeref_put_last(wf);
0083 }
0084 
0085 static void __intel_wakeref_put_work(struct work_struct *wrk)
0086 {
0087     struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
0088 
0089     if (atomic_add_unless(&wf->count, -1, 1))
0090         return;
0091 
0092     mutex_lock(&wf->mutex);
0093     ____intel_wakeref_put_last(wf);
0094 }
0095 
0096 void __intel_wakeref_init(struct intel_wakeref *wf,
0097               struct intel_runtime_pm *rpm,
0098               const struct intel_wakeref_ops *ops,
0099               struct intel_wakeref_lockclass *key)
0100 {
0101     wf->rpm = rpm;
0102     wf->ops = ops;
0103 
0104     __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
0105     atomic_set(&wf->count, 0);
0106     wf->wakeref = 0;
0107 
0108     INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
0109     lockdep_init_map(&wf->work.work.lockdep_map,
0110              "wakeref.work", &key->work, 0);
0111 }
0112 
0113 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
0114 {
0115     int err;
0116 
0117     might_sleep();
0118 
0119     err = wait_var_event_killable(&wf->wakeref,
0120                       !intel_wakeref_is_active(wf));
0121     if (err)
0122         return err;
0123 
0124     intel_wakeref_unlock_wait(wf);
0125     return 0;
0126 }
0127 
0128 static void wakeref_auto_timeout(struct timer_list *t)
0129 {
0130     struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
0131     intel_wakeref_t wakeref;
0132     unsigned long flags;
0133 
0134     if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
0135         return;
0136 
0137     wakeref = fetch_and_zero(&wf->wakeref);
0138     spin_unlock_irqrestore(&wf->lock, flags);
0139 
0140     intel_runtime_pm_put(wf->rpm, wakeref);
0141 }
0142 
0143 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
0144                  struct intel_runtime_pm *rpm)
0145 {
0146     spin_lock_init(&wf->lock);
0147     timer_setup(&wf->timer, wakeref_auto_timeout, 0);
0148     refcount_set(&wf->count, 0);
0149     wf->wakeref = 0;
0150     wf->rpm = rpm;
0151 }
0152 
0153 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
0154 {
0155     unsigned long flags;
0156 
0157     if (!timeout) {
0158         if (del_timer_sync(&wf->timer))
0159             wakeref_auto_timeout(&wf->timer);
0160         return;
0161     }
0162 
0163     /* Our mission is that we only extend an already active wakeref */
0164     assert_rpm_wakelock_held(wf->rpm);
0165 
0166     if (!refcount_inc_not_zero(&wf->count)) {
0167         spin_lock_irqsave(&wf->lock, flags);
0168         if (!refcount_inc_not_zero(&wf->count)) {
0169             INTEL_WAKEREF_BUG_ON(wf->wakeref);
0170             wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
0171             refcount_set(&wf->count, 1);
0172         }
0173         spin_unlock_irqrestore(&wf->lock, flags);
0174     }
0175 
0176     /*
0177      * If we extend a pending timer, we will only get a single timer
0178      * callback and so need to cancel the local inc by running the
0179      * elided callback to keep the wf->count balanced.
0180      */
0181     if (mod_timer(&wf->timer, jiffies + timeout))
0182         wakeref_auto_timeout(&wf->timer);
0183 }
0184 
0185 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
0186 {
0187     intel_wakeref_auto(wf, 0);
0188     INTEL_WAKEREF_BUG_ON(wf->wakeref);
0189 }