Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
0002 /* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
0003 
0004 #ifndef __LIMA_DRM_H__
0005 #define __LIMA_DRM_H__
0006 
0007 #include "drm.h"
0008 
0009 #if defined(__cplusplus)
0010 extern "C" {
0011 #endif
0012 
0013 enum drm_lima_param_gpu_id {
0014     DRM_LIMA_PARAM_GPU_ID_UNKNOWN,
0015     DRM_LIMA_PARAM_GPU_ID_MALI400,
0016     DRM_LIMA_PARAM_GPU_ID_MALI450,
0017 };
0018 
0019 enum drm_lima_param {
0020     DRM_LIMA_PARAM_GPU_ID,
0021     DRM_LIMA_PARAM_NUM_PP,
0022     DRM_LIMA_PARAM_GP_VERSION,
0023     DRM_LIMA_PARAM_PP_VERSION,
0024 };
0025 
0026 /**
0027  * get various information of the GPU
0028  */
0029 struct drm_lima_get_param {
0030     __u32 param; /* in, value in enum drm_lima_param */
0031     __u32 pad;   /* pad, must be zero */
0032     __u64 value; /* out, parameter value */
0033 };
0034 
0035 /*
0036  * heap buffer dynamically increase backup memory size when GP task fail
0037  * due to lack of heap memory. size field of heap buffer is an up bound of
0038  * the backup memory which can be set to a fairly large value.
0039  */
0040 #define LIMA_BO_FLAG_HEAP  (1 << 0)
0041 
0042 /**
0043  * create a buffer for used by GPU
0044  */
0045 struct drm_lima_gem_create {
0046     __u32 size;    /* in, buffer size */
0047     __u32 flags;   /* in, buffer flags */
0048     __u32 handle;  /* out, GEM buffer handle */
0049     __u32 pad;     /* pad, must be zero */
0050 };
0051 
0052 /**
0053  * get information of a buffer
0054  */
0055 struct drm_lima_gem_info {
0056     __u32 handle;  /* in, GEM buffer handle */
0057     __u32 va;      /* out, virtual address mapped into GPU MMU */
0058     __u64 offset;  /* out, used to mmap this buffer to CPU */
0059 };
0060 
0061 #define LIMA_SUBMIT_BO_READ   0x01
0062 #define LIMA_SUBMIT_BO_WRITE  0x02
0063 
0064 /* buffer information used by one task */
0065 struct drm_lima_gem_submit_bo {
0066     __u32 handle;  /* in, GEM buffer handle */
0067     __u32 flags;   /* in, buffer read/write by GPU */
0068 };
0069 
0070 #define LIMA_GP_FRAME_REG_NUM 6
0071 
0072 /* frame used to setup GP for each task */
0073 struct drm_lima_gp_frame {
0074     __u32 frame[LIMA_GP_FRAME_REG_NUM];
0075 };
0076 
0077 #define LIMA_PP_FRAME_REG_NUM 23
0078 #define LIMA_PP_WB_REG_NUM 12
0079 
0080 /* frame used to setup mali400 GPU PP for each task */
0081 struct drm_lima_m400_pp_frame {
0082     __u32 frame[LIMA_PP_FRAME_REG_NUM];
0083     __u32 num_pp;
0084     __u32 wb[3 * LIMA_PP_WB_REG_NUM];
0085     __u32 plbu_array_address[4];
0086     __u32 fragment_stack_address[4];
0087 };
0088 
0089 /* frame used to setup mali450 GPU PP for each task */
0090 struct drm_lima_m450_pp_frame {
0091     __u32 frame[LIMA_PP_FRAME_REG_NUM];
0092     __u32 num_pp;
0093     __u32 wb[3 * LIMA_PP_WB_REG_NUM];
0094     __u32 use_dlbu;
0095     __u32 _pad;
0096     union {
0097         __u32 plbu_array_address[8];
0098         __u32 dlbu_regs[4];
0099     };
0100     __u32 fragment_stack_address[8];
0101 };
0102 
0103 #define LIMA_PIPE_GP  0x00
0104 #define LIMA_PIPE_PP  0x01
0105 
0106 #define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0)
0107 
0108 /**
0109  * submit a task to GPU
0110  *
0111  * User can always merge multi sync_file and drm_syncobj
0112  * into one drm_syncobj as in_sync[0], but we reserve
0113  * in_sync[1] for another task's out_sync to avoid the
0114  * export/import/merge pass when explicit sync.
0115  */
0116 struct drm_lima_gem_submit {
0117     __u32 ctx;         /* in, context handle task is submitted to */
0118     __u32 pipe;        /* in, which pipe to use, GP/PP */
0119     __u32 nr_bos;      /* in, array length of bos field */
0120     __u32 frame_size;  /* in, size of frame field */
0121     __u64 bos;         /* in, array of drm_lima_gem_submit_bo */
0122     __u64 frame;       /* in, GP/PP frame */
0123     __u32 flags;       /* in, submit flags */
0124     __u32 out_sync;    /* in, drm_syncobj handle used to wait task finish after submission */
0125     __u32 in_sync[2];  /* in, drm_syncobj handle used to wait before start this task */
0126 };
0127 
0128 #define LIMA_GEM_WAIT_READ   0x01
0129 #define LIMA_GEM_WAIT_WRITE  0x02
0130 
0131 /**
0132  * wait pending GPU task finish of a buffer
0133  */
0134 struct drm_lima_gem_wait {
0135     __u32 handle;      /* in, GEM buffer handle */
0136     __u32 op;          /* in, CPU want to read/write this buffer */
0137     __s64 timeout_ns;  /* in, wait timeout in absulute time */
0138 };
0139 
0140 /**
0141  * create a context
0142  */
0143 struct drm_lima_ctx_create {
0144     __u32 id;          /* out, context handle */
0145     __u32 _pad;        /* pad, must be zero */
0146 };
0147 
0148 /**
0149  * free a context
0150  */
0151 struct drm_lima_ctx_free {
0152     __u32 id;          /* in, context handle */
0153     __u32 _pad;        /* pad, must be zero */
0154 };
0155 
0156 #define DRM_LIMA_GET_PARAM   0x00
0157 #define DRM_LIMA_GEM_CREATE  0x01
0158 #define DRM_LIMA_GEM_INFO    0x02
0159 #define DRM_LIMA_GEM_SUBMIT  0x03
0160 #define DRM_LIMA_GEM_WAIT    0x04
0161 #define DRM_LIMA_CTX_CREATE  0x05
0162 #define DRM_LIMA_CTX_FREE    0x06
0163 
0164 #define DRM_IOCTL_LIMA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GET_PARAM, struct drm_lima_get_param)
0165 #define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create)
0166 #define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info)
0167 #define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, struct drm_lima_gem_submit)
0168 #define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait)
0169 #define DRM_IOCTL_LIMA_CTX_CREATE DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_CTX_CREATE, struct drm_lima_ctx_create)
0170 #define DRM_IOCTL_LIMA_CTX_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_CTX_FREE, struct drm_lima_ctx_free)
0171 
0172 #if defined(__cplusplus)
0173 }
0174 #endif
0175 
0176 #endif /* __LIMA_DRM_H__ */