Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
0003  */
0004 #ifndef __A5XX_GPU_H__
0005 #define __A5XX_GPU_H__
0006 
0007 #include "adreno_gpu.h"
0008 
0009 /* Bringing over the hack from the previous targets */
0010 #undef ROP_COPY
0011 #undef ROP_XOR
0012 
0013 #include "a5xx.xml.h"
0014 
0015 struct a5xx_gpu {
0016     struct adreno_gpu base;
0017 
0018     struct drm_gem_object *pm4_bo;
0019     uint64_t pm4_iova;
0020 
0021     struct drm_gem_object *pfp_bo;
0022     uint64_t pfp_iova;
0023 
0024     struct drm_gem_object *gpmu_bo;
0025     uint64_t gpmu_iova;
0026     uint32_t gpmu_dwords;
0027 
0028     uint32_t lm_leakage;
0029 
0030     struct msm_ringbuffer *cur_ring;
0031     struct msm_ringbuffer *next_ring;
0032 
0033     struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
0034     struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
0035     struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
0036     uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
0037 
0038     atomic_t preempt_state;
0039     struct timer_list preempt_timer;
0040 
0041     struct drm_gem_object *shadow_bo;
0042     uint64_t shadow_iova;
0043     uint32_t *shadow;
0044 
0045     /* True if the microcode supports the WHERE_AM_I opcode */
0046     bool has_whereami;
0047 };
0048 
0049 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
0050 
0051 #ifdef CONFIG_DEBUG_FS
0052 void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
0053 #endif
0054 
0055 /*
0056  * In order to do lockless preemption we use a simple state machine to progress
0057  * through the process.
0058  *
0059  * PREEMPT_NONE - no preemption in progress.  Next state START.
0060  * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
0061  * states: TRIGGERED, NONE
0062  * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
0063  * state: NONE.
0064  * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
0065  * states: FAULTED, PENDING
0066  * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
0067  * recovery.  Next state: N/A
0068  * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
0069  * checking the success of the operation. Next state: FAULTED, NONE.
0070  */
0071 
0072 enum preempt_state {
0073     PREEMPT_NONE = 0,
0074     PREEMPT_START,
0075     PREEMPT_ABORT,
0076     PREEMPT_TRIGGERED,
0077     PREEMPT_FAULTED,
0078     PREEMPT_PENDING,
0079 };
0080 
0081 /*
0082  * struct a5xx_preempt_record is a shared buffer between the microcode and the
0083  * CPU to store the state for preemption. The record itself is much larger
0084  * (64k) but most of that is used by the CP for storage.
0085  *
0086  * There is a preemption record assigned per ringbuffer. When the CPU triggers a
0087  * preemption, it fills out the record with the useful information (wptr, ring
0088  * base, etc) and the microcode uses that information to set up the CP following
0089  * the preemption.  When a ring is switched out, the CP will save the ringbuffer
0090  * state back to the record. In this way, once the records are properly set up
0091  * the CPU can quickly switch back and forth between ringbuffers by only
0092  * updating a few registers (often only the wptr).
0093  *
0094  * These are the CPU aware registers in the record:
0095  * @magic: Must always be 0x27C4BAFC
0096  * @info: Type of the record - written 0 by the CPU, updated by the CP
0097  * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
0098  * the CP
0099  * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
0100  * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
0101  * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
0102  * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
0103  * @rbase: Value of RB_BASE written by CPU, save/restored by CP
0104  * @counter: GPU address of the storage area for the performance counters
0105  */
0106 struct a5xx_preempt_record {
0107     uint32_t magic;
0108     uint32_t info;
0109     uint32_t data;
0110     uint32_t cntl;
0111     uint32_t rptr;
0112     uint32_t wptr;
0113     uint64_t rptr_addr;
0114     uint64_t rbase;
0115     uint64_t counter;
0116 };
0117 
0118 /* Magic identifier for the preemption record */
0119 #define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
0120 
0121 /*
0122  * Even though the structure above is only a few bytes, we need a full 64k to
0123  * store the entire preemption record from the CP
0124  */
0125 #define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
0126 
0127 /*
0128  * The preemption counter block is a storage area for the value of the
0129  * preemption counters that are saved immediately before context switch. We
0130  * append it on to the end of the allocation for the preemption record.
0131  */
0132 #define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
0133 
0134 
0135 int a5xx_power_init(struct msm_gpu *gpu);
0136 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
0137 
0138 static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
0139         uint32_t reg, uint32_t mask, uint32_t value)
0140 {
0141     while (usecs--) {
0142         udelay(1);
0143         if ((gpu_read(gpu, reg) & mask) == value)
0144             return 0;
0145         cpu_relax();
0146     }
0147 
0148     return -ETIMEDOUT;
0149 }
0150 
0151 #define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
0152         ((ring)->id * sizeof(uint32_t)))
0153 
0154 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
0155 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
0156 
0157 void a5xx_preempt_init(struct msm_gpu *gpu);
0158 void a5xx_preempt_hw_init(struct msm_gpu *gpu);
0159 void a5xx_preempt_trigger(struct msm_gpu *gpu);
0160 void a5xx_preempt_irq(struct msm_gpu *gpu);
0161 void a5xx_preempt_fini(struct msm_gpu *gpu);
0162 
0163 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
0164 
0165 /* Return true if we are in a preempt state */
0166 static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
0167 {
0168     int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
0169 
0170     return !(preempt_state == PREEMPT_NONE ||
0171             preempt_state == PREEMPT_ABORT);
0172 }
0173 
0174 #endif /* __A5XX_GPU_H__ */