Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2013 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #ifndef __MSM_RINGBUFFER_H__
0008 #define __MSM_RINGBUFFER_H__
0009 
0010 #include "drm/gpu_scheduler.h"
0011 #include "msm_drv.h"
0012 
0013 #define rbmemptr(ring, member)  \
0014     ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
0015 
0016 #define rbmemptr_stats(ring, index, member) \
0017     (rbmemptr((ring), stats) + \
0018      ((index) * sizeof(struct msm_gpu_submit_stats)) + \
0019      offsetof(struct msm_gpu_submit_stats, member))
0020 
0021 struct msm_gpu_submit_stats {
0022     u64 cpcycles_start;
0023     u64 cpcycles_end;
0024     u64 alwayson_start;
0025     u64 alwayson_end;
0026 };
0027 
0028 #define MSM_GPU_SUBMIT_STATS_COUNT 64
0029 
0030 struct msm_rbmemptrs {
0031     volatile uint32_t rptr;
0032     volatile uint32_t fence;
0033 
0034     volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
0035     volatile u64 ttbr0;
0036 };
0037 
0038 struct msm_ringbuffer {
0039     struct msm_gpu *gpu;
0040     int id;
0041     struct drm_gem_object *bo;
0042     uint32_t *start, *end, *cur, *next;
0043 
0044     /*
0045      * The job scheduler for this ring.
0046      */
0047     struct drm_gpu_scheduler sched;
0048 
0049     /*
0050      * List of in-flight submits on this ring.  Protected by submit_lock.
0051      *
0052      * Currently just submits that are already written into the ring, not
0053      * submits that are still in drm_gpu_scheduler's queues.  At a later
0054      * step we could probably move to letting drm_gpu_scheduler manage
0055      * hangcheck detection and keep track of submit jobs that are in-
0056      * flight.
0057      */
0058     struct list_head submits;
0059     spinlock_t submit_lock;
0060 
0061     uint64_t iova;
0062     uint32_t hangcheck_fence;
0063     struct msm_rbmemptrs *memptrs;
0064     uint64_t memptrs_iova;
0065     struct msm_fence_context *fctx;
0066 
0067     /*
0068      * preempt_lock protects preemption and serializes wptr updates against
0069      * preemption.  Can be aquired from irq context.
0070      */
0071     spinlock_t preempt_lock;
0072 };
0073 
0074 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
0075         void *memptrs, uint64_t memptrs_iova);
0076 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
0077 
0078 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
0079 
0080 static inline void
0081 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
0082 {
0083     /*
0084      * ring->next points to the current command being written - it won't be
0085      * committed as ring->cur until the flush
0086      */
0087     if (ring->next == ring->end)
0088         ring->next = ring->start;
0089     *(ring->next++) = data;
0090 }
0091 
0092 #endif /* __MSM_RINGBUFFER_H__ */