Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /* Copyright (C) 2014-2018 Broadcom */
0003 
0004 /**
0005  * DOC: Interrupt management for the V3D engine
0006  *
0007  * When we take a bin, render, TFU done, or CSD done interrupt, we
0008  * need to signal the fence for that job so that the scheduler can
0009  * queue up the next one and unblock any waiters.
0010  *
0011  * When we take the binner out of memory interrupt, we need to
0012  * allocate some new memory and pass it to the binner so that the
0013  * current job can make progress.
0014  */
0015 
0016 #include <linux/platform_device.h>
0017 
0018 #include "v3d_drv.h"
0019 #include "v3d_regs.h"
0020 #include "v3d_trace.h"
0021 
0022 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |  \
0023                  V3D_INT_FLDONE |   \
0024                  V3D_INT_FRDONE |   \
0025                  V3D_INT_CSDDONE |  \
0026                  V3D_INT_GMPV))
0027 
0028 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |   \
0029                 V3D_HUB_INT_MMU_PTI |   \
0030                 V3D_HUB_INT_MMU_CAP |   \
0031                 V3D_HUB_INT_TFUC))
0032 
0033 static irqreturn_t
0034 v3d_hub_irq(int irq, void *arg);
0035 
0036 static void
0037 v3d_overflow_mem_work(struct work_struct *work)
0038 {
0039     struct v3d_dev *v3d =
0040         container_of(work, struct v3d_dev, overflow_mem_work);
0041     struct drm_device *dev = &v3d->drm;
0042     struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
0043     struct drm_gem_object *obj;
0044     unsigned long irqflags;
0045 
0046     if (IS_ERR(bo)) {
0047         DRM_ERROR("Couldn't allocate binner overflow mem\n");
0048         return;
0049     }
0050     obj = &bo->base.base;
0051 
0052     /* We lost a race, and our work task came in after the bin job
0053      * completed and exited.  This can happen because the HW
0054      * signals OOM before it's fully OOM, so the binner might just
0055      * barely complete.
0056      *
0057      * If we lose the race and our work task comes in after a new
0058      * bin job got scheduled, that's fine.  We'll just give them
0059      * some binner pool anyway.
0060      */
0061     spin_lock_irqsave(&v3d->job_lock, irqflags);
0062     if (!v3d->bin_job) {
0063         spin_unlock_irqrestore(&v3d->job_lock, irqflags);
0064         goto out;
0065     }
0066 
0067     drm_gem_object_get(obj);
0068     list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
0069     spin_unlock_irqrestore(&v3d->job_lock, irqflags);
0070 
0071     V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
0072     V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
0073 
0074 out:
0075     drm_gem_object_put(obj);
0076 }
0077 
0078 static irqreturn_t
0079 v3d_irq(int irq, void *arg)
0080 {
0081     struct v3d_dev *v3d = arg;
0082     u32 intsts;
0083     irqreturn_t status = IRQ_NONE;
0084 
0085     intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
0086 
0087     /* Acknowledge the interrupts we're handling here. */
0088     V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
0089 
0090     if (intsts & V3D_INT_OUTOMEM) {
0091         /* Note that the OOM status is edge signaled, so the
0092          * interrupt won't happen again until the we actually
0093          * add more memory.  Also, as of V3D 4.1, FLDONE won't
0094          * be reported until any OOM state has been cleared.
0095          */
0096         schedule_work(&v3d->overflow_mem_work);
0097         status = IRQ_HANDLED;
0098     }
0099 
0100     if (intsts & V3D_INT_FLDONE) {
0101         struct v3d_fence *fence =
0102             to_v3d_fence(v3d->bin_job->base.irq_fence);
0103 
0104         trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
0105         dma_fence_signal(&fence->base);
0106         status = IRQ_HANDLED;
0107     }
0108 
0109     if (intsts & V3D_INT_FRDONE) {
0110         struct v3d_fence *fence =
0111             to_v3d_fence(v3d->render_job->base.irq_fence);
0112 
0113         trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
0114         dma_fence_signal(&fence->base);
0115         status = IRQ_HANDLED;
0116     }
0117 
0118     if (intsts & V3D_INT_CSDDONE) {
0119         struct v3d_fence *fence =
0120             to_v3d_fence(v3d->csd_job->base.irq_fence);
0121 
0122         trace_v3d_csd_irq(&v3d->drm, fence->seqno);
0123         dma_fence_signal(&fence->base);
0124         status = IRQ_HANDLED;
0125     }
0126 
0127     /* We shouldn't be triggering these if we have GMP in
0128      * always-allowed mode.
0129      */
0130     if (intsts & V3D_INT_GMPV)
0131         dev_err(v3d->drm.dev, "GMP violation\n");
0132 
0133     /* V3D 4.2 wires the hub and core IRQs together, so if we &
0134      * didn't see the common one then check hub for MMU IRQs.
0135      */
0136     if (v3d->single_irq_line && status == IRQ_NONE)
0137         return v3d_hub_irq(irq, arg);
0138 
0139     return status;
0140 }
0141 
0142 static irqreturn_t
0143 v3d_hub_irq(int irq, void *arg)
0144 {
0145     struct v3d_dev *v3d = arg;
0146     u32 intsts;
0147     irqreturn_t status = IRQ_NONE;
0148 
0149     intsts = V3D_READ(V3D_HUB_INT_STS);
0150 
0151     /* Acknowledge the interrupts we're handling here. */
0152     V3D_WRITE(V3D_HUB_INT_CLR, intsts);
0153 
0154     if (intsts & V3D_HUB_INT_TFUC) {
0155         struct v3d_fence *fence =
0156             to_v3d_fence(v3d->tfu_job->base.irq_fence);
0157 
0158         trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
0159         dma_fence_signal(&fence->base);
0160         status = IRQ_HANDLED;
0161     }
0162 
0163     if (intsts & (V3D_HUB_INT_MMU_WRV |
0164               V3D_HUB_INT_MMU_PTI |
0165               V3D_HUB_INT_MMU_CAP)) {
0166         u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
0167         u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
0168                 (v3d->va_width - 32));
0169         static const char *const v3d41_axi_ids[] = {
0170             "L2T",
0171             "PTB",
0172             "PSE",
0173             "TLB",
0174             "CLE",
0175             "TFU",
0176             "MMU",
0177             "GMP",
0178         };
0179         const char *client = "?";
0180 
0181         V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
0182 
0183         if (v3d->ver >= 41) {
0184             axi_id = axi_id >> 5;
0185             if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
0186                 client = v3d41_axi_ids[axi_id];
0187         }
0188 
0189         dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
0190             client, axi_id, (long long)vio_addr,
0191             ((intsts & V3D_HUB_INT_MMU_WRV) ?
0192              ", write violation" : ""),
0193             ((intsts & V3D_HUB_INT_MMU_PTI) ?
0194              ", pte invalid" : ""),
0195             ((intsts & V3D_HUB_INT_MMU_CAP) ?
0196              ", cap exceeded" : ""));
0197         status = IRQ_HANDLED;
0198     }
0199 
0200     return status;
0201 }
0202 
0203 int
0204 v3d_irq_init(struct v3d_dev *v3d)
0205 {
0206     int irq1, ret, core;
0207 
0208     INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
0209 
0210     /* Clear any pending interrupts someone might have left around
0211      * for us.
0212      */
0213     for (core = 0; core < v3d->cores; core++)
0214         V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
0215     V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
0216 
0217     irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
0218     if (irq1 == -EPROBE_DEFER)
0219         return irq1;
0220     if (irq1 > 0) {
0221         ret = devm_request_irq(v3d->drm.dev, irq1,
0222                        v3d_irq, IRQF_SHARED,
0223                        "v3d_core0", v3d);
0224         if (ret)
0225             goto fail;
0226         ret = devm_request_irq(v3d->drm.dev,
0227                        platform_get_irq(v3d_to_pdev(v3d), 0),
0228                        v3d_hub_irq, IRQF_SHARED,
0229                        "v3d_hub", v3d);
0230         if (ret)
0231             goto fail;
0232     } else {
0233         v3d->single_irq_line = true;
0234 
0235         ret = devm_request_irq(v3d->drm.dev,
0236                        platform_get_irq(v3d_to_pdev(v3d), 0),
0237                        v3d_irq, IRQF_SHARED,
0238                        "v3d", v3d);
0239         if (ret)
0240             goto fail;
0241     }
0242 
0243     v3d_irq_enable(v3d);
0244     return 0;
0245 
0246 fail:
0247     if (ret != -EPROBE_DEFER)
0248         dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
0249     return ret;
0250 }
0251 
0252 void
0253 v3d_irq_enable(struct v3d_dev *v3d)
0254 {
0255     int core;
0256 
0257     /* Enable our set of interrupts, masking out any others. */
0258     for (core = 0; core < v3d->cores; core++) {
0259         V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
0260         V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
0261     }
0262 
0263     V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
0264     V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
0265 }
0266 
0267 void
0268 v3d_irq_disable(struct v3d_dev *v3d)
0269 {
0270     int core;
0271 
0272     /* Disable all interrupts. */
0273     for (core = 0; core < v3d->cores; core++)
0274         V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
0275     V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
0276 
0277     /* Clear any pending interrupts we might have left. */
0278     for (core = 0; core < v3d->cores; core++)
0279         V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
0280     V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
0281 
0282     cancel_work_sync(&v3d->overflow_mem_work);
0283 }
0284 
0285 /** Reinitializes interrupt registers when a GPU reset is performed. */
0286 void v3d_irq_reset(struct v3d_dev *v3d)
0287 {
0288     v3d_irq_enable(v3d);
0289 }