Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Syncpoint dma_fence implementation
0004  *
0005  * Copyright (c) 2020, NVIDIA Corporation.
0006  */
0007 
0008 #include <linux/dma-fence.h>
0009 #include <linux/file.h>
0010 #include <linux/fs.h>
0011 #include <linux/slab.h>
0012 #include <linux/sync_file.h>
0013 
0014 #include "fence.h"
0015 #include "intr.h"
0016 #include "syncpt.h"
0017 
0018 static DEFINE_SPINLOCK(lock);
0019 
0020 struct host1x_syncpt_fence {
0021     struct dma_fence base;
0022 
0023     atomic_t signaling;
0024 
0025     struct host1x_syncpt *sp;
0026     u32 threshold;
0027 
0028     struct host1x_waitlist *waiter;
0029     void *waiter_ref;
0030 
0031     struct delayed_work timeout_work;
0032 };
0033 
0034 static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
0035 {
0036     return "host1x";
0037 }
0038 
0039 static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
0040 {
0041     return "syncpoint";
0042 }
0043 
0044 static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
0045 {
0046     return container_of(f, struct host1x_syncpt_fence, base);
0047 }
0048 
0049 static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
0050 {
0051     struct host1x_syncpt_fence *sf = to_host1x_fence(f);
0052     int err;
0053 
0054     if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
0055         return false;
0056 
0057     dma_fence_get(f);
0058 
0059     /*
0060      * The dma_fence framework requires the fence driver to keep a
0061      * reference to any fences for which 'enable_signaling' has been
0062      * called (and that have not been signalled).
0063      *
0064      * We provide a userspace API to create arbitrary syncpoint fences,
0065      * so we cannot normally guarantee that all fences get signalled.
0066      * As such, setup a timeout, so that long-lasting fences will get
0067      * reaped eventually.
0068      */
0069     schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
0070 
0071     err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
0072                      HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
0073                      sf->waiter, &sf->waiter_ref);
0074     if (err) {
0075         cancel_delayed_work_sync(&sf->timeout_work);
0076         dma_fence_put(f);
0077         return false;
0078     }
0079 
0080     /* intr framework takes ownership of waiter */
0081     sf->waiter = NULL;
0082 
0083     /*
0084      * The fence may get signalled at any time after the above call,
0085      * so we need to initialize all state used by signalling
0086      * before it.
0087      */
0088 
0089     return true;
0090 }
0091 
0092 static void host1x_syncpt_fence_release(struct dma_fence *f)
0093 {
0094     struct host1x_syncpt_fence *sf = to_host1x_fence(f);
0095 
0096     if (sf->waiter)
0097         kfree(sf->waiter);
0098 
0099     dma_fence_free(f);
0100 }
0101 
0102 const struct dma_fence_ops host1x_syncpt_fence_ops = {
0103     .get_driver_name = host1x_syncpt_fence_get_driver_name,
0104     .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
0105     .enable_signaling = host1x_syncpt_fence_enable_signaling,
0106     .release = host1x_syncpt_fence_release,
0107 };
0108 
0109 void host1x_fence_signal(struct host1x_syncpt_fence *f)
0110 {
0111     if (atomic_xchg(&f->signaling, 1))
0112         return;
0113 
0114     /*
0115      * Cancel pending timeout work - if it races, it will
0116      * not get 'f->signaling' and return.
0117      */
0118     cancel_delayed_work_sync(&f->timeout_work);
0119 
0120     host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
0121 
0122     dma_fence_signal(&f->base);
0123     dma_fence_put(&f->base);
0124 }
0125 
0126 static void do_fence_timeout(struct work_struct *work)
0127 {
0128     struct delayed_work *dwork = (struct delayed_work *)work;
0129     struct host1x_syncpt_fence *f =
0130         container_of(dwork, struct host1x_syncpt_fence, timeout_work);
0131 
0132     if (atomic_xchg(&f->signaling, 1))
0133         return;
0134 
0135     /*
0136      * Cancel pending timeout work - if it races, it will
0137      * not get 'f->signaling' and return.
0138      */
0139     host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
0140 
0141     dma_fence_set_error(&f->base, -ETIMEDOUT);
0142     dma_fence_signal(&f->base);
0143     dma_fence_put(&f->base);
0144 }
0145 
0146 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
0147 {
0148     struct host1x_syncpt_fence *fence;
0149 
0150     fence = kzalloc(sizeof(*fence), GFP_KERNEL);
0151     if (!fence)
0152         return ERR_PTR(-ENOMEM);
0153 
0154     fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
0155     if (!fence->waiter) {
0156         kfree(fence);
0157         return ERR_PTR(-ENOMEM);
0158     }
0159 
0160     fence->sp = sp;
0161     fence->threshold = threshold;
0162 
0163     dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
0164                dma_fence_context_alloc(1), 0);
0165 
0166     INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
0167 
0168     return &fence->base;
0169 }
0170 EXPORT_SYMBOL(host1x_fence_create);