0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "kfd_device_queue_manager.h"
0026 #include "cik_regs.h"
0027 #include "oss/oss_2_4_sh_mask.h"
0028 #include "gca/gfx_7_2_sh_mask.h"
0029
0030 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
0031 struct qcm_process_device *qpd,
0032 enum cache_policy default_policy,
0033 enum cache_policy alternate_policy,
0034 void __user *alternate_aperture_base,
0035 uint64_t alternate_aperture_size);
0036 static int update_qpd_cik(struct device_queue_manager *dqm,
0037 struct qcm_process_device *qpd);
0038 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
0039 struct qcm_process_device *qpd);
0040 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
0041 struct qcm_process_device *qpd);
0042 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
0043 struct queue *q,
0044 struct qcm_process_device *qpd);
0045
0046 void device_queue_manager_init_cik(
0047 struct device_queue_manager_asic_ops *asic_ops)
0048 {
0049 asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
0050 asic_ops->update_qpd = update_qpd_cik;
0051 asic_ops->init_sdma_vm = init_sdma_vm;
0052 asic_ops->mqd_manager_init = mqd_manager_init_cik;
0053 }
0054
0055 void device_queue_manager_init_cik_hawaii(
0056 struct device_queue_manager_asic_ops *asic_ops)
0057 {
0058 asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
0059 asic_ops->update_qpd = update_qpd_cik_hawaii;
0060 asic_ops->init_sdma_vm = init_sdma_vm_hawaii;
0061 asic_ops->mqd_manager_init = mqd_manager_init_cik_hawaii;
0062 }
0063
0064 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
0065 {
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
0085 top_address_nybble == 0);
0086
0087 return PRIVATE_BASE(top_address_nybble << 12) |
0088 SHARED_BASE(top_address_nybble << 12);
0089 }
0090
0091 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
0092 struct qcm_process_device *qpd,
0093 enum cache_policy default_policy,
0094 enum cache_policy alternate_policy,
0095 void __user *alternate_aperture_base,
0096 uint64_t alternate_aperture_size)
0097 {
0098 uint32_t default_mtype;
0099 uint32_t ape1_mtype;
0100
0101 default_mtype = (default_policy == cache_policy_coherent) ?
0102 MTYPE_NONCACHED :
0103 MTYPE_CACHED;
0104
0105 ape1_mtype = (alternate_policy == cache_policy_coherent) ?
0106 MTYPE_NONCACHED :
0107 MTYPE_CACHED;
0108
0109 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
0110 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
0111 | DEFAULT_MTYPE(default_mtype)
0112 | APE1_MTYPE(ape1_mtype);
0113
0114 return true;
0115 }
0116
0117 static int update_qpd_cik(struct device_queue_manager *dqm,
0118 struct qcm_process_device *qpd)
0119 {
0120 struct kfd_process_device *pdd;
0121 unsigned int temp;
0122
0123 pdd = qpd_to_pdd(qpd);
0124
0125
0126 if (qpd->sh_mem_config == 0) {
0127 qpd->sh_mem_config =
0128 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
0129 DEFAULT_MTYPE(MTYPE_NONCACHED) |
0130 APE1_MTYPE(MTYPE_NONCACHED);
0131 qpd->sh_mem_ape1_limit = 0;
0132 qpd->sh_mem_ape1_base = 0;
0133 }
0134
0135 if (qpd->pqm->process->is_32bit_user_mode) {
0136 temp = get_sh_mem_bases_32(pdd);
0137 qpd->sh_mem_bases = SHARED_BASE(temp);
0138 qpd->sh_mem_config |= PTR32;
0139 } else {
0140 temp = get_sh_mem_bases_nybble_64(pdd);
0141 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
0142 qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
0143 }
0144
0145 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
0146 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
0147
0148 return 0;
0149 }
0150
0151 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
0152 struct qcm_process_device *qpd)
0153 {
0154 struct kfd_process_device *pdd;
0155 unsigned int temp;
0156
0157 pdd = qpd_to_pdd(qpd);
0158
0159
0160 if (qpd->sh_mem_config == 0) {
0161 qpd->sh_mem_config =
0162 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
0163 DEFAULT_MTYPE(MTYPE_NONCACHED) |
0164 APE1_MTYPE(MTYPE_NONCACHED);
0165 qpd->sh_mem_ape1_limit = 0;
0166 qpd->sh_mem_ape1_base = 0;
0167 }
0168
0169
0170
0171
0172 temp = get_sh_mem_bases_nybble_64(pdd);
0173 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
0174
0175 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
0176 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
0177
0178 return 0;
0179 }
0180
0181 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
0182 struct qcm_process_device *qpd)
0183 {
0184 uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
0185
0186 if (q->process->is_32bit_user_mode)
0187 value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
0188 get_sh_mem_bases_32(qpd_to_pdd(qpd));
0189 else
0190 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
0191 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
0192 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
0193
0194 q->properties.sdma_vm_addr = value;
0195 }
0196
0197 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
0198 struct queue *q,
0199 struct qcm_process_device *qpd)
0200 {
0201
0202
0203
0204 q->properties.sdma_vm_addr =
0205 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
0206 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
0207 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
0208 }