0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/firmware.h>
0026
0027 #include "amdgpu.h"
0028 #include "sid.h"
0029 #include "ppsmc.h"
0030 #include "amdgpu_ucode.h"
0031 #include "sislands_smc.h"
0032
0033 static int si_set_smc_sram_address(struct amdgpu_device *adev,
0034 u32 smc_address, u32 limit)
0035 {
0036 if (smc_address & 3)
0037 return -EINVAL;
0038 if ((smc_address + 3) > limit)
0039 return -EINVAL;
0040
0041 WREG32(SMC_IND_INDEX_0, smc_address);
0042 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
0043
0044 return 0;
0045 }
0046
0047 int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
0048 u32 smc_start_address,
0049 const u8 *src, u32 byte_count, u32 limit)
0050 {
0051 unsigned long flags;
0052 int ret = 0;
0053 u32 data, original_data, addr, extra_shift;
0054
0055 if (smc_start_address & 3)
0056 return -EINVAL;
0057 if ((smc_start_address + byte_count) > limit)
0058 return -EINVAL;
0059
0060 addr = smc_start_address;
0061
0062 spin_lock_irqsave(&adev->smc_idx_lock, flags);
0063 while (byte_count >= 4) {
0064
0065 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
0066
0067 ret = si_set_smc_sram_address(adev, addr, limit);
0068 if (ret)
0069 goto done;
0070
0071 WREG32(SMC_IND_DATA_0, data);
0072
0073 src += 4;
0074 byte_count -= 4;
0075 addr += 4;
0076 }
0077
0078
0079 if (byte_count > 0) {
0080 data = 0;
0081
0082 ret = si_set_smc_sram_address(adev, addr, limit);
0083 if (ret)
0084 goto done;
0085
0086 original_data = RREG32(SMC_IND_DATA_0);
0087 extra_shift = 8 * (4 - byte_count);
0088
0089 while (byte_count > 0) {
0090
0091 data = (data << 8) + *src++;
0092 byte_count--;
0093 }
0094
0095 data <<= extra_shift;
0096 data |= (original_data & ~((~0UL) << extra_shift));
0097
0098 ret = si_set_smc_sram_address(adev, addr, limit);
0099 if (ret)
0100 goto done;
0101
0102 WREG32(SMC_IND_DATA_0, data);
0103 }
0104
0105 done:
0106 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0107
0108 return ret;
0109 }
0110
0111 void amdgpu_si_start_smc(struct amdgpu_device *adev)
0112 {
0113 u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
0114
0115 tmp &= ~RST_REG;
0116
0117 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
0118 }
0119
0120 void amdgpu_si_reset_smc(struct amdgpu_device *adev)
0121 {
0122 u32 tmp;
0123
0124 RREG32(CB_CGTT_SCLK_CTRL);
0125 RREG32(CB_CGTT_SCLK_CTRL);
0126 RREG32(CB_CGTT_SCLK_CTRL);
0127 RREG32(CB_CGTT_SCLK_CTRL);
0128
0129 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
0130 RST_REG;
0131 WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
0132 }
0133
0134 int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
0135 {
0136 static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
0137
0138 return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
0139 }
0140
0141 void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
0142 {
0143 u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
0144
0145 if (enable)
0146 tmp &= ~CK_DISABLE;
0147 else
0148 tmp |= CK_DISABLE;
0149
0150 WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
0151 }
0152
0153 bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
0154 {
0155 u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
0156 u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
0157
0158 if (!(rst & RST_REG) && !(clk & CK_DISABLE))
0159 return true;
0160
0161 return false;
0162 }
0163
0164 PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
0165 PPSMC_Msg msg)
0166 {
0167 u32 tmp;
0168 int i;
0169
0170 if (!amdgpu_si_is_smc_running(adev))
0171 return PPSMC_Result_Failed;
0172
0173 WREG32(SMC_MESSAGE_0, msg);
0174
0175 for (i = 0; i < adev->usec_timeout; i++) {
0176 tmp = RREG32(SMC_RESP_0);
0177 if (tmp != 0)
0178 break;
0179 udelay(1);
0180 }
0181
0182 return (PPSMC_Result)RREG32(SMC_RESP_0);
0183 }
0184
0185 PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
0186 {
0187 u32 tmp;
0188 int i;
0189
0190 if (!amdgpu_si_is_smc_running(adev))
0191 return PPSMC_Result_OK;
0192
0193 for (i = 0; i < adev->usec_timeout; i++) {
0194 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
0195 if ((tmp & CKEN) == 0)
0196 break;
0197 udelay(1);
0198 }
0199
0200 return PPSMC_Result_OK;
0201 }
0202
0203 int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
0204 {
0205 const struct smc_firmware_header_v1_0 *hdr;
0206 unsigned long flags;
0207 u32 ucode_start_address;
0208 u32 ucode_size;
0209 const u8 *src;
0210 u32 data;
0211
0212 if (!adev->pm.fw)
0213 return -EINVAL;
0214
0215 hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
0216
0217 amdgpu_ucode_print_smc_hdr(&hdr->header);
0218
0219 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
0220 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
0221 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
0222 src = (const u8 *)
0223 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0224 if (ucode_size & 3)
0225 return -EINVAL;
0226
0227 spin_lock_irqsave(&adev->smc_idx_lock, flags);
0228 WREG32(SMC_IND_INDEX_0, ucode_start_address);
0229 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
0230 while (ucode_size >= 4) {
0231
0232 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
0233
0234 WREG32(SMC_IND_DATA_0, data);
0235
0236 src += 4;
0237 ucode_size -= 4;
0238 }
0239 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
0240 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0241
0242 return 0;
0243 }
0244
0245 int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
0246 u32 *value, u32 limit)
0247 {
0248 unsigned long flags;
0249 int ret;
0250
0251 spin_lock_irqsave(&adev->smc_idx_lock, flags);
0252 ret = si_set_smc_sram_address(adev, smc_address, limit);
0253 if (ret == 0)
0254 *value = RREG32(SMC_IND_DATA_0);
0255 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0256
0257 return ret;
0258 }
0259
0260 int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
0261 u32 value, u32 limit)
0262 {
0263 unsigned long flags;
0264 int ret;
0265
0266 spin_lock_irqsave(&adev->smc_idx_lock, flags);
0267 ret = si_set_smc_sram_address(adev, smc_address, limit);
0268 if (ret == 0)
0269 WREG32(SMC_IND_DATA_0, value);
0270 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
0271
0272 return ret;
0273 }