0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
0026 #define KFD_DEVICE_QUEUE_MANAGER_H_
0027
0028 #include <linux/rwsem.h>
0029 #include <linux/list.h>
0030 #include <linux/mutex.h>
0031 #include <linux/sched/mm.h>
0032 #include "kfd_priv.h"
0033 #include "kfd_mqd_manager.h"
0034
0035
0036 #define VMID_NUM 16
0037
0038 #define KFD_MES_PROCESS_QUANTUM 100000
0039 #define KFD_MES_GANG_QUANTUM 10000
0040
0041 struct device_process_node {
0042 struct qcm_process_device *qpd;
0043 struct list_head list;
0044 };
0045
0046 union SQ_CMD_BITS {
0047 struct {
0048 uint32_t cmd:3;
0049 uint32_t:1;
0050 uint32_t mode:3;
0051 uint32_t check_vmid:1;
0052 uint32_t trap_id:3;
0053 uint32_t:5;
0054 uint32_t wave_id:4;
0055 uint32_t simd_id:2;
0056 uint32_t:2;
0057 uint32_t queue_id:3;
0058 uint32_t:1;
0059 uint32_t vm_id:4;
0060 } bitfields, bits;
0061 uint32_t u32All;
0062 signed int i32All;
0063 float f32All;
0064 };
0065
0066 union GRBM_GFX_INDEX_BITS {
0067 struct {
0068 uint32_t instance_index:8;
0069 uint32_t sh_index:8;
0070 uint32_t se_index:8;
0071 uint32_t:5;
0072 uint32_t sh_broadcast_writes:1;
0073 uint32_t instance_broadcast_writes:1;
0074 uint32_t se_broadcast_writes:1;
0075 } bitfields, bits;
0076 uint32_t u32All;
0077 signed int i32All;
0078 float f32All;
0079 };
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 struct device_queue_manager_ops {
0131 int (*create_queue)(struct device_queue_manager *dqm,
0132 struct queue *q,
0133 struct qcm_process_device *qpd,
0134 const struct kfd_criu_queue_priv_data *qd,
0135 const void *restore_mqd,
0136 const void *restore_ctl_stack);
0137
0138 int (*destroy_queue)(struct device_queue_manager *dqm,
0139 struct qcm_process_device *qpd,
0140 struct queue *q);
0141
0142 int (*update_queue)(struct device_queue_manager *dqm,
0143 struct queue *q, struct mqd_update_info *minfo);
0144
0145 int (*register_process)(struct device_queue_manager *dqm,
0146 struct qcm_process_device *qpd);
0147
0148 int (*unregister_process)(struct device_queue_manager *dqm,
0149 struct qcm_process_device *qpd);
0150
0151 int (*initialize)(struct device_queue_manager *dqm);
0152 int (*start)(struct device_queue_manager *dqm);
0153 int (*stop)(struct device_queue_manager *dqm);
0154 void (*pre_reset)(struct device_queue_manager *dqm);
0155 void (*uninitialize)(struct device_queue_manager *dqm);
0156 int (*create_kernel_queue)(struct device_queue_manager *dqm,
0157 struct kernel_queue *kq,
0158 struct qcm_process_device *qpd);
0159
0160 void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
0161 struct kernel_queue *kq,
0162 struct qcm_process_device *qpd);
0163
0164 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
0165 struct qcm_process_device *qpd,
0166 enum cache_policy default_policy,
0167 enum cache_policy alternate_policy,
0168 void __user *alternate_aperture_base,
0169 uint64_t alternate_aperture_size);
0170
0171 int (*process_termination)(struct device_queue_manager *dqm,
0172 struct qcm_process_device *qpd);
0173
0174 int (*evict_process_queues)(struct device_queue_manager *dqm,
0175 struct qcm_process_device *qpd);
0176 int (*restore_process_queues)(struct device_queue_manager *dqm,
0177 struct qcm_process_device *qpd);
0178
0179 int (*get_wave_state)(struct device_queue_manager *dqm,
0180 struct queue *q,
0181 void __user *ctl_stack,
0182 u32 *ctl_stack_used_size,
0183 u32 *save_area_used_size);
0184
0185 int (*reset_queues)(struct device_queue_manager *dqm,
0186 uint16_t pasid);
0187 void (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
0188 const struct queue *q, u32 *mqd_size,
0189 u32 *ctl_stack_size);
0190
0191 int (*checkpoint_mqd)(struct device_queue_manager *dqm,
0192 const struct queue *q,
0193 void *mqd,
0194 void *ctl_stack);
0195 };
0196
0197 struct device_queue_manager_asic_ops {
0198 int (*update_qpd)(struct device_queue_manager *dqm,
0199 struct qcm_process_device *qpd);
0200 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
0201 struct qcm_process_device *qpd,
0202 enum cache_policy default_policy,
0203 enum cache_policy alternate_policy,
0204 void __user *alternate_aperture_base,
0205 uint64_t alternate_aperture_size);
0206 void (*init_sdma_vm)(struct device_queue_manager *dqm,
0207 struct queue *q,
0208 struct qcm_process_device *qpd);
0209 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
0210 struct kfd_dev *dev);
0211 };
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 struct device_queue_manager {
0226 struct device_queue_manager_ops ops;
0227 struct device_queue_manager_asic_ops asic_ops;
0228
0229 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
0230 struct packet_manager packet_mgr;
0231 struct kfd_dev *dev;
0232 struct mutex lock_hidden;
0233 struct list_head queues;
0234 unsigned int saved_flags;
0235 unsigned int processes_count;
0236 unsigned int active_queue_count;
0237 unsigned int active_cp_queue_count;
0238 unsigned int gws_queue_count;
0239 unsigned int total_queue_count;
0240 unsigned int next_pipe_to_allocate;
0241 unsigned int *allocated_queues;
0242 uint64_t sdma_bitmap;
0243 uint64_t xgmi_sdma_bitmap;
0244
0245 uint16_t vmid_pasid[VMID_NUM];
0246 uint64_t pipelines_addr;
0247 uint64_t fence_gpu_addr;
0248 uint64_t *fence_addr;
0249 struct kfd_mem_obj *fence_mem;
0250 bool active_runlist;
0251 int sched_policy;
0252
0253
0254 bool is_hws_hang;
0255 bool is_resetting;
0256 struct work_struct hw_exception_work;
0257 struct kfd_mem_obj hiq_sdma_mqd;
0258 bool sched_running;
0259 };
0260
0261 void device_queue_manager_init_cik(
0262 struct device_queue_manager_asic_ops *asic_ops);
0263 void device_queue_manager_init_cik_hawaii(
0264 struct device_queue_manager_asic_ops *asic_ops);
0265 void device_queue_manager_init_vi(
0266 struct device_queue_manager_asic_ops *asic_ops);
0267 void device_queue_manager_init_vi_tonga(
0268 struct device_queue_manager_asic_ops *asic_ops);
0269 void device_queue_manager_init_v9(
0270 struct device_queue_manager_asic_ops *asic_ops);
0271 void device_queue_manager_init_v10_navi10(
0272 struct device_queue_manager_asic_ops *asic_ops);
0273 void device_queue_manager_init_v11(
0274 struct device_queue_manager_asic_ops *asic_ops);
0275 void program_sh_mem_settings(struct device_queue_manager *dqm,
0276 struct qcm_process_device *qpd);
0277 unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
0278 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
0279 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
0280 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
0281 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
0282
0283 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
0284 {
0285 return (pdd->lds_base >> 16) & 0xFF;
0286 }
0287
0288 static inline unsigned int
0289 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
0290 {
0291 return (pdd->lds_base >> 60) & 0x0E;
0292 }
0293
0294
0295
0296
0297
0298 static inline void dqm_lock(struct device_queue_manager *dqm)
0299 {
0300 mutex_lock(&dqm->lock_hidden);
0301 dqm->saved_flags = memalloc_noreclaim_save();
0302 }
0303 static inline void dqm_unlock(struct device_queue_manager *dqm)
0304 {
0305 memalloc_noreclaim_restore(dqm->saved_flags);
0306 mutex_unlock(&dqm->lock_hidden);
0307 }
0308
0309 static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
0310 {
0311
0312 return get_user(*val, q_rptr + 1);
0313 }
0314 #endif