Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Tegra host1x Interrupt Management
0004  *
0005  * Copyright (c) 2010-2013, NVIDIA Corporation.
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/slab.h>
0011 #include <linux/irq.h>
0012 
0013 #include <trace/events/host1x.h>
0014 #include "channel.h"
0015 #include "dev.h"
0016 #include "fence.h"
0017 #include "intr.h"
0018 
0019 /* Wait list management */
0020 
0021 enum waitlist_state {
0022     WLS_PENDING,
0023     WLS_REMOVED,
0024     WLS_CANCELLED,
0025     WLS_HANDLED
0026 };
0027 
0028 static void waiter_release(struct kref *kref)
0029 {
0030     kfree(container_of(kref, struct host1x_waitlist, refcount));
0031 }
0032 
0033 /*
0034  * add a waiter to a waiter queue, sorted by threshold
0035  * returns true if it was added at the head of the queue
0036  */
0037 static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
0038                 struct list_head *queue)
0039 {
0040     struct host1x_waitlist *pos;
0041     u32 thresh = waiter->thresh;
0042 
0043     list_for_each_entry_reverse(pos, queue, list)
0044         if ((s32)(pos->thresh - thresh) <= 0) {
0045             list_add(&waiter->list, &pos->list);
0046             return false;
0047         }
0048 
0049     list_add(&waiter->list, queue);
0050     return true;
0051 }
0052 
0053 /*
0054  * run through a waiter queue for a single sync point ID
0055  * and gather all completed waiters into lists by actions
0056  */
0057 static void remove_completed_waiters(struct list_head *head, u32 sync,
0058             struct list_head completed[HOST1X_INTR_ACTION_COUNT])
0059 {
0060     struct list_head *dest;
0061     struct host1x_waitlist *waiter, *next, *prev;
0062 
0063     list_for_each_entry_safe(waiter, next, head, list) {
0064         if ((s32)(waiter->thresh - sync) > 0)
0065             break;
0066 
0067         dest = completed + waiter->action;
0068 
0069         /* consolidate submit cleanups */
0070         if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
0071             !list_empty(dest)) {
0072             prev = list_entry(dest->prev,
0073                       struct host1x_waitlist, list);
0074             if (prev->data == waiter->data) {
0075                 prev->count++;
0076                 dest = NULL;
0077             }
0078         }
0079 
0080         /* PENDING->REMOVED or CANCELLED->HANDLED */
0081         if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
0082             list_del(&waiter->list);
0083             kref_put(&waiter->refcount, waiter_release);
0084         } else
0085             list_move_tail(&waiter->list, dest);
0086     }
0087 }
0088 
0089 static void reset_threshold_interrupt(struct host1x *host,
0090                       struct list_head *head,
0091                       unsigned int id)
0092 {
0093     u32 thresh =
0094         list_first_entry(head, struct host1x_waitlist, list)->thresh;
0095 
0096     host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
0097     host1x_hw_intr_enable_syncpt_intr(host, id);
0098 }
0099 
0100 static void action_submit_complete(struct host1x_waitlist *waiter)
0101 {
0102     struct host1x_channel *channel = waiter->data;
0103 
0104     host1x_cdma_update(&channel->cdma);
0105 
0106     /*  Add nr_completed to trace */
0107     trace_host1x_channel_submit_complete(dev_name(channel->dev),
0108                          waiter->count, waiter->thresh);
0109 }
0110 
0111 static void action_wakeup(struct host1x_waitlist *waiter)
0112 {
0113     wait_queue_head_t *wq = waiter->data;
0114 
0115     wake_up(wq);
0116 }
0117 
0118 static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
0119 {
0120     wait_queue_head_t *wq = waiter->data;
0121 
0122     wake_up_interruptible(wq);
0123 }
0124 
0125 static void action_signal_fence(struct host1x_waitlist *waiter)
0126 {
0127     struct host1x_syncpt_fence *f = waiter->data;
0128 
0129     host1x_fence_signal(f);
0130 }
0131 
0132 typedef void (*action_handler)(struct host1x_waitlist *waiter);
0133 
0134 static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
0135     action_submit_complete,
0136     action_wakeup,
0137     action_wakeup_interruptible,
0138     action_signal_fence,
0139 };
0140 
0141 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
0142 {
0143     struct list_head *head = completed;
0144     unsigned int i;
0145 
0146     for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
0147         action_handler handler = action_handlers[i];
0148         struct host1x_waitlist *waiter, *next;
0149 
0150         list_for_each_entry_safe(waiter, next, head, list) {
0151             list_del(&waiter->list);
0152             handler(waiter);
0153             WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
0154                 WLS_REMOVED);
0155             kref_put(&waiter->refcount, waiter_release);
0156         }
0157     }
0158 }
0159 
0160 /*
0161  * Remove & handle all waiters that have completed for the given syncpt
0162  */
0163 static int process_wait_list(struct host1x *host,
0164                  struct host1x_syncpt *syncpt,
0165                  u32 threshold)
0166 {
0167     struct list_head completed[HOST1X_INTR_ACTION_COUNT];
0168     unsigned int i;
0169     int empty;
0170 
0171     for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
0172         INIT_LIST_HEAD(completed + i);
0173 
0174     spin_lock(&syncpt->intr.lock);
0175 
0176     remove_completed_waiters(&syncpt->intr.wait_head, threshold,
0177                  completed);
0178 
0179     empty = list_empty(&syncpt->intr.wait_head);
0180     if (empty)
0181         host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
0182     else
0183         reset_threshold_interrupt(host, &syncpt->intr.wait_head,
0184                       syncpt->id);
0185 
0186     spin_unlock(&syncpt->intr.lock);
0187 
0188     run_handlers(completed);
0189 
0190     return empty;
0191 }
0192 
0193 /*
0194  * Sync point threshold interrupt service thread function
0195  * Handles sync point threshold triggers, in thread context
0196  */
0197 
0198 static void syncpt_thresh_work(struct work_struct *work)
0199 {
0200     struct host1x_syncpt_intr *syncpt_intr =
0201         container_of(work, struct host1x_syncpt_intr, work);
0202     struct host1x_syncpt *syncpt =
0203         container_of(syncpt_intr, struct host1x_syncpt, intr);
0204     unsigned int id = syncpt->id;
0205     struct host1x *host = syncpt->host;
0206 
0207     (void)process_wait_list(host, syncpt,
0208                 host1x_syncpt_load(host->syncpt + id));
0209 }
0210 
0211 int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
0212                u32 thresh, enum host1x_intr_action action,
0213                void *data, struct host1x_waitlist *waiter,
0214                void **ref)
0215 {
0216     int queue_was_empty;
0217 
0218     if (waiter == NULL) {
0219         pr_warn("%s: NULL waiter\n", __func__);
0220         return -EINVAL;
0221     }
0222 
0223     /* initialize a new waiter */
0224     INIT_LIST_HEAD(&waiter->list);
0225     kref_init(&waiter->refcount);
0226     if (ref)
0227         kref_get(&waiter->refcount);
0228     waiter->thresh = thresh;
0229     waiter->action = action;
0230     atomic_set(&waiter->state, WLS_PENDING);
0231     waiter->data = data;
0232     waiter->count = 1;
0233 
0234     spin_lock(&syncpt->intr.lock);
0235 
0236     queue_was_empty = list_empty(&syncpt->intr.wait_head);
0237 
0238     if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
0239         /* added at head of list - new threshold value */
0240         host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
0241 
0242         /* added as first waiter - enable interrupt */
0243         if (queue_was_empty)
0244             host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
0245     }
0246 
0247     if (ref)
0248         *ref = waiter;
0249 
0250     spin_unlock(&syncpt->intr.lock);
0251 
0252     return 0;
0253 }
0254 
0255 void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
0256              bool flush)
0257 {
0258     struct host1x_waitlist *waiter = ref;
0259     struct host1x_syncpt *syncpt;
0260 
0261     atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
0262 
0263     syncpt = host->syncpt + id;
0264 
0265     spin_lock(&syncpt->intr.lock);
0266     if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
0267         WLS_CANCELLED) {
0268         list_del(&waiter->list);
0269         kref_put(&waiter->refcount, waiter_release);
0270     }
0271     spin_unlock(&syncpt->intr.lock);
0272 
0273     if (flush) {
0274         /* Wait until any concurrently executing handler has finished. */
0275         while (atomic_read(&waiter->state) != WLS_HANDLED)
0276             schedule();
0277     }
0278 
0279     kref_put(&waiter->refcount, waiter_release);
0280 }
0281 
0282 int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
0283 {
0284     unsigned int id;
0285     u32 nb_pts = host1x_syncpt_nb_pts(host);
0286 
0287     mutex_init(&host->intr_mutex);
0288     host->intr_syncpt_irq = irq_sync;
0289 
0290     for (id = 0; id < nb_pts; ++id) {
0291         struct host1x_syncpt *syncpt = host->syncpt + id;
0292 
0293         spin_lock_init(&syncpt->intr.lock);
0294         INIT_LIST_HEAD(&syncpt->intr.wait_head);
0295         snprintf(syncpt->intr.thresh_irq_name,
0296              sizeof(syncpt->intr.thresh_irq_name),
0297              "host1x_sp_%02u", id);
0298     }
0299 
0300     return 0;
0301 }
0302 
0303 void host1x_intr_deinit(struct host1x *host)
0304 {
0305 }
0306 
0307 void host1x_intr_start(struct host1x *host)
0308 {
0309     u32 hz = clk_get_rate(host->clk);
0310     int err;
0311 
0312     mutex_lock(&host->intr_mutex);
0313     err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
0314                         syncpt_thresh_work);
0315     if (err) {
0316         mutex_unlock(&host->intr_mutex);
0317         return;
0318     }
0319     mutex_unlock(&host->intr_mutex);
0320 }
0321 
0322 void host1x_intr_stop(struct host1x *host)
0323 {
0324     unsigned int id;
0325     struct host1x_syncpt *syncpt = host->syncpt;
0326     u32 nb_pts = host1x_syncpt_nb_pts(host);
0327 
0328     mutex_lock(&host->intr_mutex);
0329 
0330     host1x_hw_intr_disable_all_syncpt_intrs(host);
0331 
0332     for (id = 0; id < nb_pts; ++id) {
0333         struct host1x_waitlist *waiter, *next;
0334 
0335         list_for_each_entry_safe(waiter, next,
0336             &syncpt[id].intr.wait_head, list) {
0337             if (atomic_cmpxchg(&waiter->state,
0338                 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
0339                 list_del(&waiter->list);
0340                 kref_put(&waiter->refcount, waiter_release);
0341             }
0342         }
0343 
0344         if (!list_empty(&syncpt[id].intr.wait_head)) {
0345             /* output diagnostics */
0346             mutex_unlock(&host->intr_mutex);
0347             pr_warn("%s cannot stop syncpt intr id=%u\n",
0348                 __func__, id);
0349             return;
0350         }
0351     }
0352 
0353     host1x_hw_intr_free_syncpt_irq(host);
0354 
0355     mutex_unlock(&host->intr_mutex);
0356 }