0001
0002
0003
0004
0005 #include "coex.h"
0006 #include "debug.h"
0007 #include "mac.h"
0008 #include "phy.h"
0009 #include "reg.h"
0010 #include "rtw8852a.h"
0011 #include "rtw8852a_rfk.h"
0012 #include "rtw8852a_rfk_table.h"
0013 #include "rtw8852a_table.h"
0014
0015 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
0016 {
0017 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
0018 rtwdev->dbcc_en, phy_idx);
0019
0020 if (!rtwdev->dbcc_en)
0021 return RF_AB;
0022
0023 if (phy_idx == RTW89_PHY_0)
0024 return RF_A;
0025 else
0026 return RF_B;
0027 }
0028
0029 static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
0030 static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
0031 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
0032 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
0033
0034 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
0035 {
0036 u32 i;
0037
0038 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
0039 backup_bb_reg_val[i] =
0040 rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
0041 MASKDWORD);
0042 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0043 "[IQK]backup bb reg : %x, value =%x\n",
0044 rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
0045 }
0046 }
0047
0048 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
0049 u8 rf_path)
0050 {
0051 u32 i;
0052
0053 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
0054 backup_rf_reg_val[i] =
0055 rtw89_read_rf(rtwdev, rf_path,
0056 rtw8852a_backup_rf_regs[i], RFREG_MASK);
0057 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0058 "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
0059 rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
0060 }
0061 }
0062
0063 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
0064 u32 backup_bb_reg_val[])
0065 {
0066 u32 i;
0067
0068 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
0069 rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
0070 MASKDWORD, backup_bb_reg_val[i]);
0071 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0072 "[IQK]restore bb reg : %x, value =%x\n",
0073 rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
0074 }
0075 }
0076
0077 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
0078 u32 backup_rf_reg_val[], u8 rf_path)
0079 {
0080 u32 i;
0081
0082 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
0083 rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
0084 RFREG_MASK, backup_rf_reg_val[i]);
0085
0086 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0087 "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
0088 rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
0089 }
0090 }
0091
0092 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
0093 {
0094 u8 path;
0095 u32 rf_mode;
0096 int ret;
0097
0098 for (path = 0; path < RF_PATH_MAX; path++) {
0099 if (!(kpath & BIT(path)))
0100 continue;
0101
0102 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
0103 2, 5000, false, rtwdev, path, 0x00,
0104 RR_MOD_MASK);
0105 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0106 "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
0107 path, ret);
0108 }
0109 }
0110
0111 static void _dack_dump(struct rtw89_dev *rtwdev)
0112 {
0113 struct rtw89_dack_info *dack = &rtwdev->dack;
0114 u8 i;
0115 u8 t;
0116
0117 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0118 "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
0119 dack->addck_d[0][0], dack->addck_d[0][1]);
0120 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0121 "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
0122 dack->addck_d[1][0], dack->addck_d[1][1]);
0123 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0124 "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
0125 dack->dadck_d[0][0], dack->dadck_d[0][1]);
0126 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0127 "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
0128 dack->dadck_d[1][0], dack->dadck_d[1][1]);
0129
0130 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0131 "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
0132 dack->biask_d[0][0], dack->biask_d[0][1]);
0133 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0134 "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
0135 dack->biask_d[1][0], dack->biask_d[1][1]);
0136
0137 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
0138 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0139 t = dack->msbk_d[0][0][i];
0140 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
0141 }
0142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
0143 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0144 t = dack->msbk_d[0][1][i];
0145 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
0146 }
0147 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
0148 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0149 t = dack->msbk_d[1][0][i];
0150 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
0151 }
0152 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
0153 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0154 t = dack->msbk_d[1][1][i];
0155 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
0156 }
0157 }
0158
0159 static void _afe_init(struct rtw89_dev *rtwdev)
0160 {
0161 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
0162 }
0163
0164 static void _addck_backup(struct rtw89_dev *rtwdev)
0165 {
0166 struct rtw89_dack_info *dack = &rtwdev->dack;
0167
0168 rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
0169 dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
0170 B_S0_ADDCK_Q);
0171 dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
0172 B_S0_ADDCK_I);
0173
0174 rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
0175 dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
0176 B_S1_ADDCK_Q);
0177 dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
0178 B_S1_ADDCK_I);
0179 }
0180
0181 static void _addck_reload(struct rtw89_dev *rtwdev)
0182 {
0183 struct rtw89_dack_info *dack = &rtwdev->dack;
0184
0185 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
0186 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
0187 (dack->addck_d[0][1] >> 6));
0188 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
0189 (dack->addck_d[0][1] & 0x3f));
0190 rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
0191 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
0192 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
0193 (dack->addck_d[1][1] >> 6));
0194 rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
0195 (dack->addck_d[1][1] & 0x3f));
0196 rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
0197 }
0198
0199 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
0200 {
0201 struct rtw89_dack_info *dack = &rtwdev->dack;
0202 u8 i;
0203
0204 rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
0205 rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
0206 rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
0207
0208 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0209 rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
0210 dack->msbk_d[0][0][i] =
0211 (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
0212 rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
0213 dack->msbk_d[0][1][i] =
0214 (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
0215 }
0216 dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
0217 B_S0_DACKI2_K);
0218 dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
0219 B_S0_DACKQ2_K);
0220 dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
0221 B_S0_DACKI8_K) - 8;
0222 dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
0223 B_S0_DACKQ8_K) - 8;
0224 }
0225
0226 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
0227 {
0228 struct rtw89_dack_info *dack = &rtwdev->dack;
0229 u8 i;
0230
0231 rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
0232 rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
0233 rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
0234
0235 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
0236 rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
0237 dack->msbk_d[1][0][i] =
0238 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
0239 rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
0240 dack->msbk_d[1][1][i] =
0241 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
0242 }
0243 dack->biask_d[1][0] =
0244 (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
0245 dack->biask_d[1][1] =
0246 (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
0247 dack->dadck_d[1][0] =
0248 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
0249 dack->dadck_d[1][1] =
0250 (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
0251 }
0252
0253 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
0254 enum rtw89_rf_path path, u8 index)
0255 {
0256 struct rtw89_dack_info *dack = &rtwdev->dack;
0257 u32 tmp = 0, tmp_offset, tmp_reg;
0258 u8 i;
0259 u32 idx_offset, path_offset;
0260
0261 if (index == 0)
0262 idx_offset = 0;
0263 else
0264 idx_offset = 0x50;
0265
0266 if (path == RF_PATH_A)
0267 path_offset = 0;
0268 else
0269 path_offset = 0x2000;
0270
0271 tmp_offset = idx_offset + path_offset;
0272
0273 tmp = 0x0;
0274 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
0275 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
0276 tmp_reg = 0x5e14 + tmp_offset;
0277 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
0278 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
0279 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
0280
0281 tmp = 0x0;
0282 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
0283 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
0284 tmp_reg = 0x5e18 + tmp_offset;
0285 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
0286 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
0287 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
0288
0289 tmp = 0x0;
0290 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
0291 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
0292 tmp_reg = 0x5e1c + tmp_offset;
0293 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
0294 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
0295 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
0296
0297 tmp = 0x0;
0298 for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
0299 tmp |= dack->msbk_d[path][index][i] << (i * 8);
0300 tmp_reg = 0x5e20 + tmp_offset;
0301 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
0302 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
0303 rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
0304
0305 tmp = 0x0;
0306 tmp = (dack->biask_d[path][index] << 22) |
0307 (dack->dadck_d[path][index] << 14);
0308 tmp_reg = 0x5e24 + tmp_offset;
0309 rtw89_phy_write32(rtwdev, tmp_reg, tmp);
0310 }
0311
0312 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
0313 {
0314 u8 i;
0315
0316 for (i = 0; i < 2; i++)
0317 _dack_reload_by_path(rtwdev, path, i);
0318
0319 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
0320 &rtw8852a_rfk_dack_reload_defs_a_tbl,
0321 &rtw8852a_rfk_dack_reload_defs_b_tbl);
0322 }
0323
0324 #define ADDC_T_AVG 100
0325 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
0326 {
0327 s32 dc_re = 0, dc_im = 0;
0328 u32 tmp;
0329 u32 i;
0330
0331 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
0332 &rtw8852a_rfk_check_addc_defs_a_tbl,
0333 &rtw8852a_rfk_check_addc_defs_b_tbl);
0334
0335 for (i = 0; i < ADDC_T_AVG; i++) {
0336 tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
0337 dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
0338 dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
0339 }
0340
0341 dc_re /= ADDC_T_AVG;
0342 dc_im /= ADDC_T_AVG;
0343
0344 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0345 "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
0346 }
0347
0348 static void _addck(struct rtw89_dev *rtwdev)
0349 {
0350 struct rtw89_dack_info *dack = &rtwdev->dack;
0351 u32 val;
0352 int ret;
0353
0354
0355 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
0356
0357 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
0358 _check_addc(rtwdev, RF_PATH_A);
0359
0360 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
0361
0362 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0363 false, rtwdev, 0x1e00, BIT(0));
0364 if (ret) {
0365 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
0366 dack->addck_timeout[0] = true;
0367 }
0368 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
0369 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
0370 _check_addc(rtwdev, RF_PATH_A);
0371
0372 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
0373
0374
0375 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
0376
0377 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
0378 _check_addc(rtwdev, RF_PATH_B);
0379
0380 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
0381
0382 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0383 false, rtwdev, 0x3e00, BIT(0));
0384 if (ret) {
0385 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
0386 dack->addck_timeout[1] = true;
0387 }
0388 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
0389 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
0390 _check_addc(rtwdev, RF_PATH_B);
0391
0392 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
0393 }
0394
0395 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
0396 {
0397 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
0398 &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
0399 &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
0400
0401 _check_addc(rtwdev, path);
0402
0403 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
0404 &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
0405 &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
0406 }
0407
0408 static void _dack_s0(struct rtw89_dev *rtwdev)
0409 {
0410 struct rtw89_dack_info *dack = &rtwdev->dack;
0411 u32 val;
0412 int ret;
0413
0414 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
0415
0416 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0417 false, rtwdev, 0x5e28, BIT(15));
0418 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0419 false, rtwdev, 0x5e78, BIT(15));
0420 if (ret) {
0421 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
0422 dack->msbk_timeout[0] = true;
0423 }
0424 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
0425
0426 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
0427
0428 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0429 false, rtwdev, 0x5e48, BIT(17));
0430 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0431 false, rtwdev, 0x5e98, BIT(17));
0432 if (ret) {
0433 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
0434 dack->dadck_timeout[0] = true;
0435 }
0436 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
0437
0438 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
0439
0440 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
0441 _check_dadc(rtwdev, RF_PATH_A);
0442
0443 _dack_backup_s0(rtwdev);
0444 _dack_reload(rtwdev, RF_PATH_A);
0445
0446 rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
0447 }
0448
0449 static void _dack_s1(struct rtw89_dev *rtwdev)
0450 {
0451 struct rtw89_dack_info *dack = &rtwdev->dack;
0452 u32 val;
0453 int ret;
0454
0455 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
0456
0457 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0458 false, rtwdev, 0x7e28, BIT(15));
0459 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0460 false, rtwdev, 0x7e78, BIT(15));
0461 if (ret) {
0462 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
0463 dack->msbk_timeout[1] = true;
0464 }
0465 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
0466
0467 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
0468
0469 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0470 false, rtwdev, 0x7e48, BIT(17));
0471 ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
0472 false, rtwdev, 0x7e98, BIT(17));
0473 if (ret) {
0474 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
0475 dack->dadck_timeout[1] = true;
0476 }
0477 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
0478
0479 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
0480
0481 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
0482 _check_dadc(rtwdev, RF_PATH_B);
0483
0484 _dack_backup_s1(rtwdev);
0485 _dack_reload(rtwdev, RF_PATH_B);
0486
0487 rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
0488 }
0489
0490 static void _dack(struct rtw89_dev *rtwdev)
0491 {
0492 _dack_s0(rtwdev);
0493 _dack_s1(rtwdev);
0494 }
0495
0496 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
0497 {
0498 struct rtw89_dack_info *dack = &rtwdev->dack;
0499 u32 rf0_0, rf1_0;
0500 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
0501
0502 dack->dack_done = false;
0503 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
0504 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
0505 rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
0506 rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
0507 _afe_init(rtwdev);
0508 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
0509 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
0510 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
0511 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
0512 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
0513 _addck(rtwdev);
0514 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
0515 _addck_backup(rtwdev);
0516 _addck_reload(rtwdev);
0517 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
0518 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
0519 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
0520 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
0521 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
0522 _dack(rtwdev);
0523 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
0524 _dack_dump(rtwdev);
0525 dack->dack_done = true;
0526 rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
0527 rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
0528 rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
0529 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
0530 dack->dack_cnt++;
0531 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
0532 }
0533
0534 #define RTW8852A_NCTL_VER 0xd
0535 #define RTW8852A_IQK_VER 0x2a
0536 #define RTW8852A_IQK_SS 2
0537 #define RTW8852A_IQK_THR_REK 8
0538 #define RTW8852A_IQK_CFIR_GROUP_NR 4
0539
0540 enum rtw8852a_iqk_type {
0541 ID_TXAGC,
0542 ID_FLOK_COARSE,
0543 ID_FLOK_FINE,
0544 ID_TXK,
0545 ID_RXAGC,
0546 ID_RXK,
0547 ID_NBTXK,
0548 ID_NBRXK,
0549 };
0550
0551 static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
0552 {
0553 u8 i = 0x0;
0554 u32 fft[6] = {0x0};
0555
0556 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
0557 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
0558 fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0559 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
0560 fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0561 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
0562 fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0563 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
0564 fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0565 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
0566 fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0567 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
0568 fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
0569 for (i = 0; i < 6; i++)
0570 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
0571 path, i, fft[i]);
0572 }
0573
0574 static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
0575 {
0576 u8 i = 0x0;
0577 u32 tmp = 0x0;
0578
0579 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
0580 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
0581 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
0582
0583 for (i = 0x0; i < 0x18; i++) {
0584 rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
0585 rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
0586 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
0587 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
0588 path, BIT(path), tmp);
0589 udelay(1);
0590 }
0591 rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
0592 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
0593 rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
0594 udelay(1);
0595 }
0596
0597 static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
0598 u8 group)
0599 {
0600 static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
0601 {0x8f20, 0x8f54, 0x8f88, 0x8fbc},
0602 {0x9320, 0x9354, 0x9388, 0x93bc},
0603 };
0604 u8 idx = 0x0;
0605 u32 tmp = 0x0;
0606 u32 base_addr;
0607
0608 if (path >= RTW8852A_IQK_SS) {
0609 rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
0610 return;
0611 }
0612 if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
0613 rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
0614 return;
0615 }
0616
0617 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
0618 rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
0619
0620 base_addr = base_addrs[path][group];
0621
0622 for (idx = 0; idx < 0x0d; idx++) {
0623 tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
0624 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0625 "[IQK] %x = %x\n",
0626 base_addr + (idx << 2), tmp);
0627 }
0628
0629 if (path == 0x0) {
0630 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
0631 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
0632 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
0633 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
0634 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
0635 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
0636 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
0637 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
0638 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
0639 } else {
0640 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
0641 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
0642 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
0643 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
0644 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
0645 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
0646 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
0647 tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
0648 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
0649 }
0650 rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
0651 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
0652 udelay(1);
0653 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
0654 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
0655 BIT(path), tmp);
0656 }
0657
0658 static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
0659 u8 group)
0660 {
0661 static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
0662 {0x8d00, 0x8d44, 0x8d88, 0x8dcc},
0663 {0x9100, 0x9144, 0x9188, 0x91cc},
0664 };
0665 u8 idx = 0x0;
0666 u32 tmp = 0x0;
0667 u32 base_addr;
0668
0669 if (path >= RTW8852A_IQK_SS) {
0670 rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
0671 return;
0672 }
0673 if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
0674 rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
0675 return;
0676 }
0677
0678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
0679 rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
0680
0681 base_addr = base_addrs[path][group];
0682 for (idx = 0; idx < 0x10; idx++) {
0683 tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
0684 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0685 "[IQK]%x = %x\n",
0686 base_addr + (idx << 2), tmp);
0687 }
0688
0689 if (path == 0x0) {
0690 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
0691 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
0692 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
0693 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
0694 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
0695 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
0696 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
0697 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
0698 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
0699 } else {
0700 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
0701 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
0702 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
0703 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
0704 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
0705 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
0706 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
0707 tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
0708 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
0709 }
0710 rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
0711 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
0712 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
0713 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
0714 BIT(path), tmp);
0715 }
0716
0717 static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
0718 {
0719 u32 tmp = 0x0;
0720 u32 i = 0x0;
0721
0722 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
0723 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
0724 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
0725 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
0726 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
0727
0728 for (i = 0; i <= 0x9f; i++) {
0729 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
0730 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
0731 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
0732 }
0733
0734 for (i = 0; i <= 0x9f; i++) {
0735 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
0736 tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
0737 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
0738 }
0739 rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
0740 rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
0741 }
0742
0743 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
0744 {
0745 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
0746 u32 tmp = 0x0;
0747
0748 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
0749 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
0750 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
0751 udelay(1);
0752 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
0753 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
0754 udelay(1);
0755 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
0756 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
0757 udelay(1);
0758 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
0759 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
0760
0761 switch (iqk_info->iqk_band[path]) {
0762 case RTW89_BAND_2G:
0763 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
0764 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
0765 break;
0766 case RTW89_BAND_5G:
0767 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
0768 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
0769 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
0770 break;
0771 default:
0772 break;
0773 }
0774 tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
0775 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
0776 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
0777 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
0778 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
0779 fsleep(128);
0780 }
0781
0782 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
0783 {
0784 u32 tmp;
0785 u32 val;
0786 int ret;
0787
0788 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
0789 false, rtwdev, 0xbff8, MASKBYTE0);
0790 if (ret)
0791 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
0792 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
0793 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
0794 tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
0795 rtw89_debug(rtwdev, RTW89_DBG_RFK,
0796 "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
0797
0798 return false;
0799 }
0800
0801 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
0802 enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
0803 {
0804 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
0805 bool fail = false;
0806 u32 iqk_cmd = 0x0;
0807 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
0808 u32 addr_rfc_ctl = 0x0;
0809
0810 if (path == RF_PATH_A)
0811 addr_rfc_ctl = 0x5864;
0812 else
0813 addr_rfc_ctl = 0x7864;
0814
0815 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
0816 switch (ktype) {
0817 case ID_TXAGC:
0818 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
0819 break;
0820 case ID_FLOK_COARSE:
0821 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
0822 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
0823 iqk_cmd = 0x108 | (1 << (4 + path));
0824 break;
0825 case ID_FLOK_FINE:
0826 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
0827 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
0828 iqk_cmd = 0x208 | (1 << (4 + path));
0829 break;
0830 case ID_TXK:
0831 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
0832 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
0833 iqk_cmd = 0x008 | (1 << (path + 4)) |
0834 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
0835 break;
0836 case ID_RXAGC:
0837 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
0838 break;
0839 case ID_RXK:
0840 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
0841 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
0842 iqk_cmd = 0x008 | (1 << (path + 4)) |
0843 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
0844 break;
0845 case ID_NBTXK:
0846 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
0847 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
0848 iqk_cmd = 0x308 | (1 << (4 + path));
0849 break;
0850 case ID_NBRXK:
0851 rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
0852 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
0853 iqk_cmd = 0x608 | (1 << (4 + path));
0854 break;
0855 default:
0856 return false;
0857 }
0858
0859 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
0860 rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
0861 udelay(1);
0862 fail = _iqk_check_cal(rtwdev, path, ktype);
0863 if (iqk_info->iqk_xym_en)
0864 _iqk_read_xym_dbcc0(rtwdev, path);
0865 if (iqk_info->iqk_fft_en)
0866 _iqk_read_fft_dbcc0(rtwdev, path);
0867 if (iqk_info->iqk_sram_en)
0868 _iqk_sram(rtwdev, path);
0869 if (iqk_info->iqk_cfir_en) {
0870 if (ktype == ID_TXK) {
0871 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
0872 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
0873 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
0874 _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
0875 } else {
0876 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
0877 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
0878 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
0879 _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
0880 }
0881 }
0882
0883 rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
0884
0885 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
0886
0887 return fail;
0888 }
0889
0890 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
0891 enum rtw89_phy_idx phy_idx, u8 path)
0892 {
0893 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
0894 static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
0895 static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
0896 static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
0897 static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
0898 static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
0899 static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
0900 u8 gp = 0x0;
0901 bool fail = false;
0902 u32 rf0 = 0x0;
0903
0904 for (gp = 0; gp < 0x4; gp++) {
0905 switch (iqk_info->iqk_band[path]) {
0906 case RTW89_BAND_2G:
0907 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
0908 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
0909 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
0910 break;
0911 case RTW89_BAND_5G:
0912 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
0913 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
0914 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
0915 break;
0916 default:
0917 break;
0918 }
0919 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
0920 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
0921 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
0922 rf0 | iqk_info->syn1to2);
0923 rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
0924 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
0925 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
0926 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
0927 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
0928 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
0929 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
0930 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
0931 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
0932 }
0933
0934 switch (iqk_info->iqk_band[path]) {
0935 case RTW89_BAND_2G:
0936 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
0937 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
0938 break;
0939 case RTW89_BAND_5G:
0940 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
0941 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
0942 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
0943 break;
0944 default:
0945 break;
0946 }
0947 iqk_info->nb_rxcfir[path] = 0x40000000;
0948 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
0949 B_IQK_RES_RXCFIR, 0x5);
0950 iqk_info->is_wb_rxiqk[path] = true;
0951 return false;
0952 }
0953
0954 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
0955 enum rtw89_phy_idx phy_idx, u8 path)
0956 {
0957 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
0958 u8 group = 0x0;
0959 u32 rf0 = 0x0, tmp = 0x0;
0960 u32 idxrxgain_a = 0x1a0;
0961 u32 idxattc2_a = 0x00;
0962 u32 idxattc1_a = 0x5;
0963 u32 idxrxgain_g = 0x1E0;
0964 u32 idxattc2_g = 0x15;
0965 u32 idxattc1_g = 0x0;
0966 bool fail = false;
0967
0968 switch (iqk_info->iqk_band[path]) {
0969 case RTW89_BAND_2G:
0970 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
0971 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
0972 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
0973 break;
0974 case RTW89_BAND_5G:
0975 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
0976 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
0977 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
0978 break;
0979 default:
0980 break;
0981 }
0982 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
0983 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
0984 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
0985 rf0 | iqk_info->syn1to2);
0986 rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
0987 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
0988 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
0989 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
0990 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
0991 B_CFIR_LUT_GP, group);
0992 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
0993 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
0994 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
0995
0996 switch (iqk_info->iqk_band[path]) {
0997 case RTW89_BAND_2G:
0998 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
0999 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1000 break;
1001 case RTW89_BAND_5G:
1002 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
1003 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1004 rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
1005 break;
1006 default:
1007 break;
1008 }
1009 if (!fail) {
1010 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1011 iqk_info->nb_rxcfir[path] = tmp | 0x2;
1012 } else {
1013 iqk_info->nb_rxcfir[path] = 0x40000002;
1014 }
1015 return fail;
1016 }
1017
1018 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1019 {
1020 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1021
1022 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1023 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1024 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1025 MASKDWORD, 0x4d000a08);
1026 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1027 B_P0_RXCK_VAL, 0x2);
1028 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1029 rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1030 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
1031 } else {
1032 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
1033 MASKDWORD, 0x44000a08);
1034 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1035 B_P0_RXCK_VAL, 0x1);
1036 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1037 rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
1038 rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
1039 }
1040 }
1041
1042 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
1043 enum rtw89_phy_idx phy_idx, u8 path)
1044 {
1045 static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
1046 static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
1047 static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
1048 static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
1049 static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
1050 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1051 bool fail = false;
1052 u8 gp = 0x0;
1053 u32 tmp = 0x0;
1054
1055 for (gp = 0x0; gp < 0x4; gp++) {
1056 switch (iqk_info->iqk_band[path]) {
1057 case RTW89_BAND_2G:
1058 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1059 B_RFGAIN_BND, 0x08);
1060 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1061 g_txgain[gp]);
1062 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
1063 g_attsmxr[gp]);
1064 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
1065 g_attsmxr[gp]);
1066 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1067 MASKDWORD, g_itqt[gp]);
1068 break;
1069 case RTW89_BAND_5G:
1070 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1071 B_RFGAIN_BND, 0x04);
1072 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
1073 a_txgain[gp]);
1074 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1075 MASKDWORD, a_itqt[gp]);
1076 break;
1077 default:
1078 break;
1079 }
1080 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1081 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1082 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1083 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1084 B_CFIR_LUT_GP, gp);
1085 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1086 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1087 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
1088 }
1089
1090 iqk_info->nb_txcfir[path] = 0x40000000;
1091 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1092 B_IQK_RES_TXCFIR, 0x5);
1093 iqk_info->is_wb_txiqk[path] = true;
1094 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1095 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1096 BIT(path), tmp);
1097 return false;
1098 }
1099
1100 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1101 enum rtw89_phy_idx phy_idx, u8 path)
1102 {
1103 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1104 u8 group = 0x2;
1105 u32 a_mode_txgain = 0x64e2;
1106 u32 g_mode_txgain = 0x61e8;
1107 u32 attsmxr = 0x1;
1108 u32 itqt = 0x12;
1109 u32 tmp = 0x0;
1110 bool fail = false;
1111
1112 switch (iqk_info->iqk_band[path]) {
1113 case RTW89_BAND_2G:
1114 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1115 B_RFGAIN_BND, 0x08);
1116 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
1117 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
1118 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
1119 break;
1120 case RTW89_BAND_5G:
1121 rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
1122 B_RFGAIN_BND, 0x04);
1123 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
1124 break;
1125 default:
1126 break;
1127 }
1128 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1129 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1130 rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
1131 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
1132 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1133 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1134 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1135 if (!fail) {
1136 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1137 iqk_info->nb_txcfir[path] = tmp | 0x2;
1138 } else {
1139 iqk_info->nb_txcfir[path] = 0x40000002;
1140 }
1141 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
1143 BIT(path), tmp);
1144 return fail;
1145 }
1146
1147 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1148 {
1149 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1150
1151 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1152 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1153 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1154 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1155 else
1156 rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1157 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1158 rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1159 }
1160
1161 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1162 {
1163 bool is_fail = false;
1164 u32 tmp = 0x0;
1165 u32 core_i = 0x0;
1166 u32 core_q = 0x0;
1167
1168 tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1169 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
1170 path, tmp);
1171 core_i = FIELD_GET(RR_TXMO_COI, tmp);
1172 core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1173 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
1174 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
1175
1176 if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1177 is_fail = true;
1178 return is_fail;
1179 }
1180
1181 static bool _iqk_lok(struct rtw89_dev *rtwdev,
1182 enum rtw89_phy_idx phy_idx, u8 path)
1183 {
1184 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1185 u32 rf0 = 0x0;
1186 u8 itqt = 0x12;
1187 bool fail = false;
1188 bool tmp = false;
1189
1190 switch (iqk_info->iqk_band[path]) {
1191 case RTW89_BAND_2G:
1192 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
1193 itqt = 0x09;
1194 break;
1195 case RTW89_BAND_5G:
1196 rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
1197 itqt = 0x12;
1198 break;
1199 default:
1200 break;
1201 }
1202 rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
1203 rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1204 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
1205 rf0 | iqk_info->syn1to2);
1206 rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
1207 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1208 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
1209 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
1210 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
1211 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
1212 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1213 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1214 iqk_info->lok_cor_fail[0][path] = tmp;
1215 fsleep(10);
1216 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
1217 tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1218 iqk_info->lok_fin_fail[0][path] = tmp;
1219 fail = _lok_finetune_check(rtwdev, path);
1220 return fail;
1221 }
1222
1223 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1224 {
1225 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1226
1227 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1228 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1229 udelay(1);
1230 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1231 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1232 udelay(1);
1233 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1234 udelay(1);
1235 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
1236 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
1237 switch (iqk_info->iqk_band[path]) {
1238 case RTW89_BAND_2G:
1239 rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
1240 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1241 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1242 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
1243 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1244 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1245 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1246 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1247 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
1248 rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1249 rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1250 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1251 0x403e0 | iqk_info->syn1to2);
1252 udelay(1);
1253 break;
1254 case RTW89_BAND_5G:
1255 rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1256 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
1257 rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
1258 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
1259 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1260 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
1261 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
1262 rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1263 rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1264 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
1265 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
1266 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1267 0x403e0 | iqk_info->syn1to2);
1268 udelay(1);
1269 break;
1270 default:
1271 break;
1272 }
1273 }
1274
1275 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1276 {
1277 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1278 }
1279
1280 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1281 u8 path)
1282 {
1283 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1284 u32 tmp = 0x0;
1285 bool flag = 0x0;
1286
1287 iqk_info->thermal[path] =
1288 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1289 iqk_info->thermal_rek_en = false;
1290 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
1291 iqk_info->thermal[path]);
1292 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1293 iqk_info->lok_cor_fail[0][path]);
1294 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1295 iqk_info->lok_fin_fail[0][path]);
1296 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1297 iqk_info->iqk_tx_fail[0][path]);
1298 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1299 iqk_info->iqk_rx_fail[0][path]);
1300 flag = iqk_info->lok_cor_fail[0][path];
1301 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
1302 flag = iqk_info->lok_fin_fail[0][path];
1303 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
1304 flag = iqk_info->iqk_tx_fail[0][path];
1305 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
1306 flag = iqk_info->iqk_rx_fail[0][path];
1307 rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
1308
1309 tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1310 iqk_info->bp_iqkenable[path] = tmp;
1311 tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1312 iqk_info->bp_txkresult[path] = tmp;
1313 tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1314 iqk_info->bp_rxkresult[path] = tmp;
1315
1316 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1317 (u8)iqk_info->iqk_times);
1318
1319 tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
1320 if (tmp != 0x0)
1321 iqk_info->iqk_fail_cnt++;
1322 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
1323 iqk_info->iqk_fail_cnt);
1324 }
1325
1326 static
1327 void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1328 {
1329 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1330 bool lok_is_fail = false;
1331 u8 ibias = 0x1;
1332 u8 i = 0;
1333
1334 _iqk_txclk_setting(rtwdev, path);
1335
1336 for (i = 0; i < 3; i++) {
1337 _lok_res_table(rtwdev, path, ibias++);
1338 _iqk_txk_setting(rtwdev, path);
1339 lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1340 if (!lok_is_fail)
1341 break;
1342 }
1343 if (iqk_info->is_nbiqk)
1344 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1345 else
1346 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1347
1348 _iqk_rxclk_setting(rtwdev, path);
1349 _iqk_rxk_setting(rtwdev, path);
1350 if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
1351 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1352 else
1353 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1354
1355 _iqk_info_iqk(rtwdev, phy_idx, path);
1356 }
1357
1358 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1359 enum rtw89_phy_idx phy, u8 path)
1360 {
1361 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1362 struct rtw89_hal *hal = &rtwdev->hal;
1363 u32 reg_rf18 = 0x0, reg_35c = 0x0;
1364 u8 idx = 0;
1365 u8 get_empty_table = false;
1366
1367 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1368 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1369 if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1370 get_empty_table = true;
1371 break;
1372 }
1373 }
1374 if (!get_empty_table) {
1375 idx = iqk_info->iqk_table_idx[path] + 1;
1376 if (idx > RTW89_IQK_CHS_NR - 1)
1377 idx = 0;
1378 }
1379 reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1380 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
1381 reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
1382
1383 iqk_info->iqk_band[path] = hal->current_band_type;
1384 iqk_info->iqk_bw[path] = hal->current_band_width;
1385 iqk_info->iqk_ch[path] = hal->current_channel;
1386
1387 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1388 "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1389 iqk_info->iqk_band[path]);
1390 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1391 path, iqk_info->iqk_bw[path]);
1392 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1393 path, iqk_info->iqk_ch[path]);
1394 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1395 "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1396 rtwdev->dbcc_en ? "on" : "off",
1397 iqk_info->iqk_band[path] == 0 ? "2G" :
1398 iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1399 iqk_info->iqk_ch[path],
1400 iqk_info->iqk_bw[path] == 0 ? "20M" :
1401 iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1402 if (reg_35c == 0x01)
1403 iqk_info->syn1to2 = 0x1;
1404 else
1405 iqk_info->syn1to2 = 0x0;
1406
1407 rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
1408 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
1409 (u8)iqk_info->iqk_band[path]);
1410 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
1411 (u8)iqk_info->iqk_bw[path]);
1412 rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
1413 (u8)iqk_info->iqk_ch[path]);
1414
1415 rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
1416 }
1417
1418 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1419 u8 path)
1420 {
1421 _iqk_by_path(rtwdev, phy_idx, path);
1422 }
1423
1424 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1425 {
1426 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1427
1428 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1429 iqk_info->nb_txcfir[path]);
1430 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1431 iqk_info->nb_rxcfir[path]);
1432 rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1433 rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
1434 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1435 rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
1436 rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
1437 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1438 rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
1439 rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
1440 rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
1441 rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
1442 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
1443 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1444 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
1445 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1446 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1447 rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
1448 rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
1449 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1450 }
1451
1452 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1453 enum rtw89_phy_idx phy_idx, u8 path)
1454 {
1455 const struct rtw89_rfk_tbl *tbl;
1456
1457 switch (_kpath(rtwdev, phy_idx)) {
1458 case RF_A:
1459 tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
1460 break;
1461 case RF_B:
1462 tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
1463 break;
1464 default:
1465 tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
1466 break;
1467 }
1468
1469 rtw89_rfk_parser(rtwdev, tbl);
1470 }
1471
1472 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1473 {
1474 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1475 u8 idx = iqk_info->iqk_table_idx[path];
1476
1477 if (rtwdev->dbcc_en) {
1478 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1479 B_COEF_SEL_IQC, path & 0x1);
1480 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1481 B_CFIR_LUT_G2, path & 0x1);
1482 } else {
1483 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
1484 B_COEF_SEL_IQC, idx);
1485 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1486 B_CFIR_LUT_G2, idx);
1487 }
1488 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1489 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1490 rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
1491 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1492 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
1493 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
1494 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
1495 }
1496
1497 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1498 enum rtw89_phy_idx phy_idx, u8 path)
1499 {
1500 const struct rtw89_rfk_tbl *tbl;
1501
1502 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1503
1504 switch (_kpath(rtwdev, phy_idx)) {
1505 case RF_A:
1506 tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
1507 break;
1508 case RF_B:
1509 tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
1510 break;
1511 default:
1512 tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
1513 break;
1514 }
1515
1516 rtw89_rfk_parser(rtwdev, tbl);
1517 }
1518
1519 static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
1520 {
1521 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1522 u8 phy_idx = 0x0;
1523
1524 iqk_info->iqk_times++;
1525
1526 if (path == 0x0)
1527 phy_idx = RTW89_PHY_0;
1528 else
1529 phy_idx = RTW89_PHY_1;
1530
1531 _iqk_get_ch_info(rtwdev, phy_idx, path);
1532 _iqk_macbb_setting(rtwdev, phy_idx, path);
1533 _iqk_preset(rtwdev, path);
1534 _iqk_start_iqk(rtwdev, phy_idx, path);
1535 _iqk_restore(rtwdev, path);
1536 _iqk_afebb_restore(rtwdev, phy_idx, path);
1537 }
1538
1539 static void _iqk_track(struct rtw89_dev *rtwdev)
1540 {
1541 struct rtw89_iqk_info *iqk = &rtwdev->iqk;
1542 u8 path = 0x0;
1543 u8 cur_ther;
1544
1545 if (iqk->iqk_band[0] == RTW89_BAND_2G)
1546 return;
1547 if (iqk->iqk_bw[0] < RTW89_CHANNEL_WIDTH_80)
1548 return;
1549
1550
1551 for (path = 0; path < 1; path++) {
1552 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
1553
1554 if (abs(cur_ther - iqk->thermal[path]) > RTW8852A_IQK_THR_REK)
1555 iqk->thermal_rek_en = true;
1556 else
1557 iqk->thermal_rek_en = false;
1558 }
1559 }
1560
1561 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1562 {
1563 u32 rf_reg5, rck_val = 0;
1564 u32 val;
1565 int ret;
1566
1567 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1568
1569 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1570
1571 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1572 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1573
1574 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1575 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1576
1577
1578 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1579
1580 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1581 false, rtwdev, path, 0x1c, BIT(3));
1582 if (ret)
1583 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1584
1585 rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1586 rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1587
1588
1589 rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
1590
1591 rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
1592 rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
1593
1594 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1595
1596 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1597 "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
1598 rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1599 rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
1600 rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
1601 }
1602
1603 static void _iqk_init(struct rtw89_dev *rtwdev)
1604 {
1605 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1606 u8 ch, path;
1607
1608 rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1609 if (iqk_info->is_iqk_init)
1610 return;
1611
1612 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1613 iqk_info->is_iqk_init = true;
1614 iqk_info->is_nbiqk = false;
1615 iqk_info->iqk_fft_en = false;
1616 iqk_info->iqk_sram_en = false;
1617 iqk_info->iqk_cfir_en = false;
1618 iqk_info->iqk_xym_en = false;
1619 iqk_info->thermal_rek_en = false;
1620 iqk_info->iqk_times = 0x0;
1621
1622 for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1623 iqk_info->iqk_channel[ch] = 0x0;
1624 for (path = 0; path < RTW8852A_IQK_SS; path++) {
1625 iqk_info->lok_cor_fail[ch][path] = false;
1626 iqk_info->lok_fin_fail[ch][path] = false;
1627 iqk_info->iqk_tx_fail[ch][path] = false;
1628 iqk_info->iqk_rx_fail[ch][path] = false;
1629 iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1630 iqk_info->iqk_table_idx[path] = 0x0;
1631 }
1632 }
1633 }
1634
1635 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1636 enum rtw89_phy_idx phy_idx, u8 path)
1637 {
1638 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1639 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1640 u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
1641 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1642
1643 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1644
1645 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1646 "[IQK]==========IQK strat!!!!!==========\n");
1647 iqk_info->iqk_times++;
1648 iqk_info->kcount = 0;
1649 iqk_info->version = RTW8852A_IQK_VER;
1650
1651 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1652 _iqk_get_ch_info(rtwdev, phy_idx, path);
1653 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1654 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1655 _iqk_macbb_setting(rtwdev, phy_idx, path);
1656 _iqk_preset(rtwdev, path);
1657 _iqk_start_iqk(rtwdev, phy_idx, path);
1658 _iqk_restore(rtwdev, path);
1659 _iqk_afebb_restore(rtwdev, phy_idx, path);
1660 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1661 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1662 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1663 }
1664
1665 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1666 {
1667 switch (_kpath(rtwdev, phy_idx)) {
1668 case RF_A:
1669 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1670 break;
1671 case RF_B:
1672 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1673 break;
1674 case RF_AB:
1675 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1676 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1677 break;
1678 default:
1679 break;
1680 }
1681 }
1682
1683 #define RXDCK_VER_8852A 0xe
1684
1685 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1686 enum rtw89_rf_path path, bool is_afe)
1687 {
1688 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1689 u32 ori_val;
1690
1691 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1692 "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
1693 path, is_afe ? "AFE" : "RFC");
1694
1695 ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
1696
1697 if (is_afe) {
1698 rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1699 rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
1700 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1701 B_P0_RXCK_VAL, 0x3);
1702 rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
1703 rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
1704 B_S0_RXDC2_AVG, 0x3);
1705 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
1706 rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
1707 rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1708 rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
1709 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
1710 }
1711
1712 rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
1713 rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
1714
1715 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
1716
1717 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1718 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1719
1720 fsleep(600);
1721
1722 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
1723
1724 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1725
1726 if (is_afe) {
1727 rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
1728 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
1729 MASKDWORD, ori_val);
1730 }
1731 }
1732
1733 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1734 bool is_afe)
1735 {
1736 u8 path, kpath, dck_tune;
1737 u32 rf_reg5;
1738 u32 addr;
1739
1740 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1741 "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
1742 RXDCK_VER_8852A, rtwdev->hal.cv);
1743
1744 kpath = _kpath(rtwdev, phy);
1745
1746 for (path = 0; path < 2; path++) {
1747 if (!(kpath & BIT(path)))
1748 continue;
1749
1750 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1751 dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
1752
1753 if (rtwdev->is_tssi_mode[path]) {
1754 addr = 0x5818 + (path << 13);
1755
1756 rtw89_phy_write32_set(rtwdev, addr, BIT(30));
1757 }
1758
1759 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1760 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1761 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1762 _set_rx_dck(rtwdev, phy, path, is_afe);
1763 rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
1764 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1765
1766 if (rtwdev->is_tssi_mode[path]) {
1767 addr = 0x5818 + (path << 13);
1768
1769 rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
1770 }
1771 }
1772 }
1773
1774 #define RTW8852A_RF_REL_VERSION 34
1775 #define RTW8852A_DPK_VER 0x10
1776 #define RTW8852A_DPK_TH_AVG_NUM 4
1777 #define RTW8852A_DPK_RF_PATH 2
1778 #define RTW8852A_DPK_KIP_REG_NUM 2
1779
1780 enum rtw8852a_dpk_id {
1781 LBK_RXIQK = 0x06,
1782 SYNC = 0x10,
1783 MDPK_IDL = 0x11,
1784 MDPK_MPA = 0x12,
1785 GAIN_LOSS = 0x13,
1786 GAIN_CAL = 0x14,
1787 };
1788
1789 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1790 enum rtw89_rf_path path, bool is_bybb)
1791 {
1792 if (is_bybb)
1793 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1794 else
1795 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1796 }
1797
1798 static void _dpk_onoff(struct rtw89_dev *rtwdev,
1799 enum rtw89_rf_path path, bool off);
1800
1801 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
1802 u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
1803 u8 path)
1804 {
1805 u8 i;
1806
1807 for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1808 reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
1809 reg[i] + (path << 8),
1810 MASKDWORD);
1811 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1812 reg[i] + (path << 8), reg_bkup[path][i]);
1813 }
1814 }
1815
1816 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
1817 u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
1818 {
1819 u8 i;
1820
1821 for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
1822 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1823 MASKDWORD, reg_bkup[path][i]);
1824 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1825 reg[i] + (path << 8), reg_bkup[path][i]);
1826 }
1827 }
1828
1829 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1830 enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
1831 {
1832 u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
1833 u16 dpk_cmd = 0x0;
1834 u32 val;
1835 int ret;
1836
1837 dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
1838
1839 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
1840
1841 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1842 rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
1843
1844 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1845 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1846
1847 rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1848
1849 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
1850
1851 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1852 "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1853 id == 0x06 ? "LBK_RXIQK" :
1854 id == 0x10 ? "SYNC" :
1855 id == 0x11 ? "MDPK_IDL" :
1856 id == 0x12 ? "MDPK_MPA" :
1857 id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1858 dpk_cmd, ret);
1859
1860 if (ret) {
1861 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1862 "[DPK] one-shot over 20ms!!!!\n");
1863 return 1;
1864 }
1865
1866 return 0;
1867 }
1868
1869 static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
1870 enum rtw89_phy_idx phy,
1871 enum rtw89_rf_path path)
1872 {
1873 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1874 _set_rx_dck(rtwdev, phy, path, false);
1875 }
1876
1877 static void _dpk_information(struct rtw89_dev *rtwdev,
1878 enum rtw89_phy_idx phy,
1879 enum rtw89_rf_path path)
1880 {
1881 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1882 struct rtw89_hal *hal = &rtwdev->hal;
1883
1884 u8 kidx = dpk->cur_idx[path];
1885
1886 dpk->bp[path][kidx].band = hal->current_band_type;
1887 dpk->bp[path][kidx].ch = hal->current_channel;
1888 dpk->bp[path][kidx].bw = hal->current_band_width;
1889
1890 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1891 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1892 path, dpk->cur_idx[path], phy,
1893 rtwdev->is_tssi_mode[path] ? "on" : "off",
1894 rtwdev->dbcc_en ? "on" : "off",
1895 dpk->bp[path][kidx].band == 0 ? "2G" :
1896 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1897 dpk->bp[path][kidx].ch,
1898 dpk->bp[path][kidx].bw == 0 ? "20M" :
1899 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1900 }
1901
1902 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1903 enum rtw89_phy_idx phy,
1904 enum rtw89_rf_path path, u8 kpath)
1905 {
1906 switch (kpath) {
1907 case RF_A:
1908 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
1909
1910 if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
1911 rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1912
1913 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
1914 break;
1915 case RF_B:
1916 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
1917
1918 if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
1919 rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
1920
1921 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
1922 break;
1923 case RF_AB:
1924 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
1925 break;
1926 default:
1927 break;
1928 }
1929 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1930 "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1931 }
1932
1933 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1934 enum rtw89_phy_idx phy,
1935 enum rtw89_rf_path path, u8 kpath)
1936 {
1937 switch (kpath) {
1938 case RF_A:
1939 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
1940 break;
1941 case RF_B:
1942 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
1943 break;
1944 case RF_AB:
1945 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
1946 break;
1947 default:
1948 break;
1949 }
1950 rtw89_debug(rtwdev, RTW89_DBG_RFK,
1951 "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1952 }
1953
1954 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1955 enum rtw89_rf_path path, bool is_pause)
1956 {
1957 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1958 B_P0_TSSI_TRK_EN, is_pause);
1959
1960 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1961 is_pause ? "pause" : "resume");
1962 }
1963
1964 static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
1965 enum rtw89_rf_path path, u8 kidx)
1966 {
1967 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1968 rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
1969 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
1970 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
1971 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
1972 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
1973 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
1974 MASKDWORD, 0x003f2e2e);
1975 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1976 MASKDWORD, 0x005b5b5b);
1977
1978 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
1979 path, kidx);
1980 }
1981
1982 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1983 enum rtw89_rf_path path)
1984 {
1985 rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
1986 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1987 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
1988 rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
1989
1990 if (rtwdev->hal.cv > CHIP_CBV)
1991 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
1992
1993 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1994 }
1995
1996 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
1997 enum rtw89_phy_idx phy,
1998 enum rtw89_rf_path path)
1999 {
2000 u8 cur_rxbb;
2001
2002 cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2003
2004 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
2005
2006 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
2007 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
2008 rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
2009 rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
2010 rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
2011 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
2012 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
2013 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
2014
2015 fsleep(70);
2016
2017 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
2018
2019 if (cur_rxbb <= 0xa)
2020 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
2021 else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
2022 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
2023 else
2024 rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
2025
2026 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
2027
2028 _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2029
2030 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2031 rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
2032
2033 rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
2034 rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
2035 rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
2036 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
2037
2038 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
2039 }
2040
2041 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
2042 enum rtw89_rf_path path)
2043 {
2044 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2045
2046 dpk->bp[path][kidx].ther_dpk =
2047 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2048
2049 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
2050 dpk->bp[path][kidx].ther_dpk);
2051 }
2052
2053 static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
2054 enum rtw89_rf_path path)
2055 {
2056 u8 txagc_ori = 0x38;
2057
2058 rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
2059
2060 return txagc_ori;
2061 }
2062
2063 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2064 enum rtw89_rf_path path, u8 kidx)
2065 {
2066 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2067
2068 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2069 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
2070 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
2071 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2072 rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
2073 } else {
2074 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
2075 rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
2076 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
2077 rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
2078 }
2079 rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
2080 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
2081 rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
2082
2083 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2084 "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
2085 rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2086 rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
2087 rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
2088 }
2089
2090 static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
2091 enum rtw89_rf_path path, bool is_manual)
2092 {
2093 u8 tmp_pad, tmp_txbb;
2094
2095 if (is_manual) {
2096 rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
2097 tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
2098 rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2099 B_RFGAIN_PAD, tmp_pad);
2100
2101 tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
2102 rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
2103 B_RFGAIN_TXBB, tmp_txbb);
2104
2105 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
2106 B_LOAD_COEF_CFIR, 0x1);
2107 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
2108 B_LOAD_COEF_CFIR);
2109
2110 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
2111
2112 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2113 "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
2114 tmp_txbb);
2115 } else {
2116 rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
2117 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2118 "[DPK] disable manual switch TXCFIR\n");
2119 }
2120 }
2121
2122 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
2123 enum rtw89_rf_path path, bool is_bypass)
2124 {
2125 if (is_bypass) {
2126 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2127 B_RXIQC_BYPASS2, 0x1);
2128 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
2129 B_RXIQC_BYPASS, 0x1);
2130 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2131 "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
2132 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2133 MASKDWORD));
2134 } else {
2135 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
2136 rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
2137 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2138 "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
2139 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
2140 MASKDWORD));
2141 }
2142 }
2143
2144 static
2145 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2146 {
2147 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2148
2149 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2150 rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
2151 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2152 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2153 else
2154 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2155
2156 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2157 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2158 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2159 }
2160
2161 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2162 enum rtw89_rf_path path, u8 kidx, u8 gain)
2163 {
2164 u8 val;
2165
2166 val = 0x80 + kidx * 0x20 + gain * 0x10;
2167 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2168 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2169 "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2170 gain, val);
2171 }
2172
2173 static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
2174 enum rtw89_rf_path path)
2175 {
2176 #define DPK_SYNC_TH_DC_I 200
2177 #define DPK_SYNC_TH_DC_Q 200
2178 #define DPK_SYNC_TH_CORR 170
2179 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2180 u16 dc_i, dc_q;
2181 u8 corr_val, corr_idx;
2182
2183 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2184
2185 corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2186 corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2187
2188 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2189 "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
2190 corr_val);
2191
2192 dpk->corr_idx[path][0] = corr_idx;
2193 dpk->corr_val[path][0] = corr_val;
2194
2195 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2196
2197 dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2198 dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2199
2200 dc_i = abs(sign_extend32(dc_i, 11));
2201 dc_q = abs(sign_extend32(dc_q, 11));
2202
2203 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2204 path, dc_i, dc_q);
2205
2206 dpk->dc_i[path][0] = dc_i;
2207 dpk->dc_q[path][0] = dc_q;
2208
2209 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2210 corr_val < DPK_SYNC_TH_CORR)
2211 return true;
2212 else
2213 return false;
2214 }
2215
2216 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2217 enum rtw89_rf_path path, u8 kidx)
2218 {
2219 _dpk_tpg_sel(rtwdev, path, kidx);
2220 _dpk_one_shot(rtwdev, phy, path, SYNC);
2221 return _dpk_sync_check(rtwdev, path);
2222 }
2223
2224 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2225 {
2226 u16 dgain = 0x0;
2227
2228 rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2229
2230 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2231
2232 dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2233
2234 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
2235 dgain);
2236
2237 return dgain;
2238 }
2239
2240 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2241 {
2242 s8 offset;
2243
2244 if (dgain >= 0x783)
2245 offset = 0x6;
2246 else if (dgain <= 0x782 && dgain >= 0x551)
2247 offset = 0x3;
2248 else if (dgain <= 0x550 && dgain >= 0x3c4)
2249 offset = 0x0;
2250 else if (dgain <= 0x3c3 && dgain >= 0x2aa)
2251 offset = -3;
2252 else if (dgain <= 0x2a9 && dgain >= 0x1e3)
2253 offset = -6;
2254 else if (dgain <= 0x1e2 && dgain >= 0x156)
2255 offset = -9;
2256 else if (dgain <= 0x155)
2257 offset = -12;
2258 else
2259 offset = 0x0;
2260
2261 return offset;
2262 }
2263
2264 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2265 {
2266 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2267 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2268 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2269 }
2270
2271 static void _dpk_gainloss(struct rtw89_dev *rtwdev,
2272 enum rtw89_phy_idx phy, enum rtw89_rf_path path,
2273 u8 kidx)
2274 {
2275 _dpk_table_select(rtwdev, path, kidx, 1);
2276 _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2277 }
2278
2279 #define DPK_TXAGC_LOWER 0x2e
2280 #define DPK_TXAGC_UPPER 0x3f
2281 #define DPK_TXAGC_INVAL 0xff
2282
2283 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
2284 enum rtw89_rf_path path, s8 gain_offset)
2285 {
2286 u8 txagc;
2287
2288 txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
2289
2290 if (txagc - gain_offset < DPK_TXAGC_LOWER)
2291 txagc = DPK_TXAGC_LOWER;
2292 else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2293 txagc = DPK_TXAGC_UPPER;
2294 else
2295 txagc = txagc - gain_offset;
2296
2297 rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
2298
2299 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2300 gain_offset, txagc);
2301 return txagc;
2302 }
2303
2304 enum dpk_agc_step {
2305 DPK_AGC_STEP_SYNC_DGAIN,
2306 DPK_AGC_STEP_GAIN_ADJ,
2307 DPK_AGC_STEP_GAIN_LOSS_IDX,
2308 DPK_AGC_STEP_GL_GT_CRITERION,
2309 DPK_AGC_STEP_GL_LT_CRITERION,
2310 DPK_AGC_STEP_SET_TX_GAIN,
2311 };
2312
2313 static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2314 {
2315 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2316 u8 i;
2317
2318 rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
2319
2320 if (is_check) {
2321 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2322 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2323 val1_i = abs(sign_extend32(val1_i, 11));
2324 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2325 val1_q = abs(sign_extend32(val1_q, 11));
2326 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2327 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2328 val2_i = abs(sign_extend32(val2_i, 11));
2329 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2330 val2_q = abs(sign_extend32(val2_q, 11));
2331
2332 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2333 phy_div(val1_i * val1_i + val1_q * val1_q,
2334 val2_i * val2_i + val2_q * val2_q));
2335
2336 } else {
2337 for (i = 0; i < 32; i++) {
2338 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2339 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2340 "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2341 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2342 }
2343 }
2344 if ((val1_i * val1_i + val1_q * val1_q) >=
2345 ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
2346 return 1;
2347 else
2348 return 0;
2349 }
2350
2351 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2352 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2353 bool loss_only)
2354 {
2355 #define DPK_AGC_ADJ_LMT 6
2356 #define DPK_DGAIN_UPPER 1922
2357 #define DPK_DGAIN_LOWER 342
2358 #define DPK_RXBB_UPPER 0x1f
2359 #define DPK_RXBB_LOWER 0
2360 #define DPK_GL_CRIT 7
2361 u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2362 u8 agc_cnt = 0;
2363 bool limited_rxbb = false;
2364 s8 offset = 0;
2365 u16 dgain = 0;
2366 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2367 bool goout = false;
2368
2369 tmp_txagc = init_txagc;
2370
2371 do {
2372 switch (step) {
2373 case DPK_AGC_STEP_SYNC_DGAIN:
2374 if (_dpk_sync(rtwdev, phy, path, kidx)) {
2375 tmp_txagc = DPK_TXAGC_INVAL;
2376 goout = true;
2377 break;
2378 }
2379
2380 dgain = _dpk_dgain_read(rtwdev);
2381
2382 if (loss_only || limited_rxbb)
2383 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2384 else
2385 step = DPK_AGC_STEP_GAIN_ADJ;
2386 break;
2387
2388 case DPK_AGC_STEP_GAIN_ADJ:
2389 tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2390 offset = _dpk_dgain_mapping(rtwdev, dgain);
2391
2392 if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
2393 tmp_rxbb = DPK_RXBB_UPPER;
2394 limited_rxbb = true;
2395 } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
2396 tmp_rxbb = DPK_RXBB_LOWER;
2397 limited_rxbb = true;
2398 } else {
2399 tmp_rxbb = tmp_rxbb + offset;
2400 }
2401
2402 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2403 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2404 "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
2405 tmp_rxbb);
2406 if (offset != 0 || agc_cnt == 0) {
2407 if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
2408 _dpk_bypass_rxcfir(rtwdev, path, true);
2409 else
2410 _dpk_lbk_rxiqk(rtwdev, phy, path);
2411 }
2412 if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
2413 step = DPK_AGC_STEP_SYNC_DGAIN;
2414 else
2415 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2416
2417 agc_cnt++;
2418 break;
2419
2420 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2421 _dpk_gainloss(rtwdev, phy, path, kidx);
2422 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2423
2424 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2425 tmp_gl_idx > DPK_GL_CRIT)
2426 step = DPK_AGC_STEP_GL_GT_CRITERION;
2427 else if (tmp_gl_idx == 0)
2428 step = DPK_AGC_STEP_GL_LT_CRITERION;
2429 else
2430 step = DPK_AGC_STEP_SET_TX_GAIN;
2431 break;
2432
2433 case DPK_AGC_STEP_GL_GT_CRITERION:
2434 if (tmp_txagc == DPK_TXAGC_LOWER) {
2435 goout = true;
2436 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2437 "[DPK] Txagc@lower bound!!\n");
2438 } else {
2439 tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
2440 }
2441 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2442 agc_cnt++;
2443 break;
2444
2445 case DPK_AGC_STEP_GL_LT_CRITERION:
2446 if (tmp_txagc == DPK_TXAGC_UPPER) {
2447 goout = true;
2448 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2449 "[DPK] Txagc@upper bound!!\n");
2450 } else {
2451 tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
2452 }
2453 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2454 agc_cnt++;
2455 break;
2456
2457 case DPK_AGC_STEP_SET_TX_GAIN:
2458 tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
2459 goout = true;
2460 agc_cnt++;
2461 break;
2462
2463 default:
2464 goout = true;
2465 break;
2466 }
2467 } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
2468
2469 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2470 "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2471 tmp_rxbb);
2472
2473 return tmp_txagc;
2474 }
2475
2476 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2477 {
2478 switch (order) {
2479 case 0:
2480 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2481 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2482 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2483 break;
2484 case 1:
2485 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2486 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2487 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2488 break;
2489 case 2:
2490 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2491 rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2492 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2493 break;
2494 default:
2495 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2496 "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2497 break;
2498 }
2499
2500 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2501 "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2502 }
2503
2504 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2505 enum rtw89_rf_path path, u8 kidx, u8 gain)
2506 {
2507 _dpk_set_mdpd_para(rtwdev, 0x0);
2508 _dpk_table_select(rtwdev, path, kidx, 1);
2509 _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2510 }
2511
2512 static void _dpk_fill_result(struct rtw89_dev *rtwdev,
2513 enum rtw89_rf_path path, u8 kidx, u8 gain,
2514 u8 txagc)
2515 {
2516 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2517
2518 u16 pwsf = 0x78;
2519 u8 gs = 0x5b;
2520
2521 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2522
2523 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2524 "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2525 pwsf, gs);
2526
2527 dpk->bp[path][kidx].txagc_dpk = txagc;
2528 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2529 0x3F << ((gain << 3) + (kidx << 4)), txagc);
2530
2531 dpk->bp[path][kidx].pwsf = pwsf;
2532 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2533 0x1FF << (gain << 4), pwsf);
2534
2535 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2536 rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
2537
2538 dpk->bp[path][kidx].gs = gs;
2539 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2540 MASKDWORD, 0x065b5b5b);
2541
2542 rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
2543
2544 rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
2545 }
2546
2547 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2548 enum rtw89_rf_path path)
2549 {
2550 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2551 bool is_reload = false;
2552 u8 idx, cur_band, cur_ch;
2553
2554 cur_band = rtwdev->hal.current_band_type;
2555 cur_ch = rtwdev->hal.current_channel;
2556
2557 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2558 if (cur_band != dpk->bp[path][idx].band ||
2559 cur_ch != dpk->bp[path][idx].ch)
2560 continue;
2561
2562 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2563 B_COEF_SEL_MDPD, idx);
2564 dpk->cur_idx[path] = idx;
2565 is_reload = true;
2566 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2567 "[DPK] reload S%d[%d] success\n", path, idx);
2568 }
2569
2570 return is_reload;
2571 }
2572
2573 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2574 enum rtw89_rf_path path, u8 gain)
2575 {
2576 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2577 u8 txagc = 0, kidx = dpk->cur_idx[path];
2578 bool is_fail = false;
2579
2580 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2581 "[DPK] ========= S%d[%d] DPK Start =========\n", path,
2582 kidx);
2583
2584 _rf_direct_cntrl(rtwdev, path, false);
2585 txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
2586 _dpk_rf_setting(rtwdev, gain, path, kidx);
2587 _dpk_rx_dck(rtwdev, phy, path);
2588
2589 _dpk_kip_setting(rtwdev, path, kidx);
2590 _dpk_manual_txcfir(rtwdev, path, true);
2591 txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
2592 if (txagc == DPK_TXAGC_INVAL)
2593 is_fail = true;
2594 _dpk_get_thermal(rtwdev, kidx, path);
2595
2596 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2597 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2598 _dpk_fill_result(rtwdev, path, kidx, gain, txagc);
2599 _dpk_manual_txcfir(rtwdev, path, false);
2600
2601 if (!is_fail)
2602 dpk->bp[path][kidx].path_ok = true;
2603 else
2604 dpk->bp[path][kidx].path_ok = false;
2605
2606 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2607 is_fail ? "Check" : "Success");
2608
2609 return is_fail;
2610 }
2611
2612 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2613 enum rtw89_phy_idx phy, u8 kpath)
2614 {
2615 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2616 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2617 u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2618 u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
2619 u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
2620 u8 path;
2621 bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
2622
2623 if (dpk->is_dpk_reload_en) {
2624 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2625 if (!(kpath & BIT(path)))
2626 continue;
2627
2628 reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2629 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2630 dpk->cur_idx[path] = !dpk->cur_idx[path];
2631 else
2632 _dpk_onoff(rtwdev, path, false);
2633 }
2634 } else {
2635 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
2636 dpk->cur_idx[path] = 0;
2637 }
2638
2639 if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
2640 (kpath == RF_B && reloaded[RF_PATH_B]) ||
2641 (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
2642 return;
2643
2644 _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2645
2646 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2647 if (!(kpath & BIT(path)) || reloaded[path])
2648 continue;
2649 if (rtwdev->is_tssi_mode[path])
2650 _dpk_tssi_pause(rtwdev, path, true);
2651 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2652 _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2653 _dpk_information(rtwdev, phy, path);
2654 }
2655
2656 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2657
2658 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2659 if (!(kpath & BIT(path)) || reloaded[path])
2660 continue;
2661
2662 is_fail = _dpk_main(rtwdev, phy, path, 1);
2663 _dpk_onoff(rtwdev, path, is_fail);
2664 }
2665
2666 _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
2667 _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2668
2669 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2670 if (!(kpath & BIT(path)) || reloaded[path])
2671 continue;
2672
2673 _dpk_kip_restore(rtwdev, path);
2674 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2675 _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2676 if (rtwdev->is_tssi_mode[path])
2677 _dpk_tssi_pause(rtwdev, path, false);
2678 }
2679 }
2680
2681 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2682 {
2683 struct rtw89_fem_info *fem = &rtwdev->fem;
2684
2685 if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
2686 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2687 "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2688 return true;
2689 } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
2690 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2691 "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2692 return true;
2693 }
2694
2695 return false;
2696 }
2697
2698 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2699 {
2700 u8 path, kpath;
2701
2702 kpath = _kpath(rtwdev, phy);
2703
2704 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2705 if (kpath & BIT(path))
2706 _dpk_onoff(rtwdev, path, true);
2707 }
2708 }
2709
2710 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2711 {
2712 rtw89_debug(rtwdev, RTW89_DBG_RFK,
2713 "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2714 RTW8852A_DPK_VER, rtwdev->hal.cv,
2715 RTW8852A_RF_REL_VERSION);
2716
2717 if (_dpk_bypass_check(rtwdev, phy))
2718 _dpk_force_bypass(rtwdev, phy);
2719 else
2720 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2721 }
2722
2723 static void _dpk_onoff(struct rtw89_dev *rtwdev,
2724 enum rtw89_rf_path path, bool off)
2725 {
2726 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2727 u8 val, kidx = dpk->cur_idx[path];
2728
2729 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
2730
2731 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2732 MASKBYTE3, 0x6 | val);
2733
2734 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2735 kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2736 }
2737
2738 static void _dpk_track(struct rtw89_dev *rtwdev)
2739 {
2740 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2741 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2742 u8 path, kidx;
2743 u8 trk_idx = 0, txagc_rf = 0;
2744 s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
2745 u16 pwsf[2];
2746 u8 cur_ther;
2747 s8 delta_ther[2] = {0};
2748
2749 for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
2750 kidx = dpk->cur_idx[path];
2751
2752 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2753 "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2754 path, kidx, dpk->bp[path][kidx].ch);
2755
2756 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2757
2758 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2759 "[DPK_TRK] thermal now = %d\n", cur_ther);
2760
2761 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2762 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2763
2764 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2765 delta_ther[path] = delta_ther[path] * 3 / 2;
2766 else
2767 delta_ther[path] = delta_ther[path] * 5 / 2;
2768
2769 txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2770 RR_MODOPT_M_TXPWR);
2771
2772 if (rtwdev->is_tssi_mode[path]) {
2773 trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2774
2775 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2776 "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2777 txagc_rf, trk_idx);
2778
2779 txagc_bb =
2780 (s8)rtw89_phy_read32_mask(rtwdev,
2781 R_TXAGC_BB + (path << 13),
2782 MASKBYTE2);
2783 txagc_bb_tp =
2784 (u8)rtw89_phy_read32_mask(rtwdev,
2785 R_TXAGC_TP + (path << 13),
2786 B_TXAGC_TP);
2787
2788 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2789 "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2790 txagc_bb_tp, txagc_bb);
2791
2792 txagc_ofst =
2793 (s8)rtw89_phy_read32_mask(rtwdev,
2794 R_TXAGC_BB + (path << 13),
2795 MASKBYTE3);
2796
2797 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2798 "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2799 txagc_ofst, delta_ther[path]);
2800
2801 if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2802 BIT(15)) == 0x1)
2803 txagc_ofst = 0;
2804
2805 if (txagc_rf != 0 && cur_ther != 0)
2806 ini_diff = txagc_ofst + delta_ther[path];
2807
2808 if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
2809 B_P0_TXDPD) == 0x0) {
2810 pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2811 txagc_bb + ini_diff +
2812 tssi_info->extra_ofst[path];
2813 pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
2814 txagc_bb + ini_diff +
2815 tssi_info->extra_ofst[path];
2816 } else {
2817 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
2818 tssi_info->extra_ofst[path];
2819 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
2820 tssi_info->extra_ofst[path];
2821 }
2822
2823 } else {
2824 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2825 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2826 }
2827
2828 if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
2829 txagc_rf != 0) {
2830 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2831 "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2832 pwsf[0], pwsf[1]);
2833
2834 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2835 0x000001FF, pwsf[0]);
2836 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2837 0x01FF0000, pwsf[1]);
2838 }
2839 }
2840 }
2841
2842 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2843 enum rtw89_rf_path path)
2844 {
2845 enum rtw89_band band = rtwdev->hal.current_band_type;
2846
2847 if (band == RTW89_BAND_2G)
2848 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2849 else
2850 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2851 }
2852
2853 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2854 {
2855 enum rtw89_band band = rtwdev->hal.current_band_type;
2856
2857 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
2858 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2859 &rtw8852a_tssi_sys_defs_2g_tbl,
2860 &rtw8852a_tssi_sys_defs_5g_tbl);
2861 }
2862
2863 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2864 enum rtw89_rf_path path)
2865 {
2866 enum rtw89_band band = rtwdev->hal.current_band_type;
2867
2868 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2869 &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
2870 &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
2871 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2872 &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
2873 &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
2874 }
2875
2876 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2877 enum rtw89_phy_idx phy,
2878 enum rtw89_rf_path path)
2879 {
2880 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2881 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2882 &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2883 }
2884
2885 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2886 enum rtw89_rf_path path)
2887 {
2888 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2889 &rtw8852a_tssi_dck_defs_a_tbl,
2890 &rtw8852a_tssi_dck_defs_b_tbl);
2891 }
2892
2893 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2894 enum rtw89_rf_path path)
2895 {
2896 #define __get_val(ptr, idx) \
2897 ({ \
2898 s8 *__ptr = (ptr); \
2899 u8 __idx = (idx), __i, __v; \
2900 u32 __val = 0; \
2901 for (__i = 0; __i < 4; __i++) { \
2902 __v = (__ptr[__idx + __i]); \
2903 __val |= (__v << (8 * __i)); \
2904 } \
2905 __val; \
2906 })
2907 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2908 u8 ch = rtwdev->hal.current_channel;
2909 u8 subband = rtwdev->hal.current_subband;
2910 const s8 *thm_up_a = NULL;
2911 const s8 *thm_down_a = NULL;
2912 const s8 *thm_up_b = NULL;
2913 const s8 *thm_down_b = NULL;
2914 u8 thermal = 0xff;
2915 s8 thm_ofst[64] = {0};
2916 u32 tmp = 0;
2917 u8 i, j;
2918
2919 switch (subband) {
2920 default:
2921 case RTW89_CH_2G:
2922 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
2923 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
2924 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
2925 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
2926 break;
2927 case RTW89_CH_5G_BAND_1:
2928 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
2929 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
2930 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
2931 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
2932 break;
2933 case RTW89_CH_5G_BAND_3:
2934 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
2935 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
2936 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
2937 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
2938 break;
2939 case RTW89_CH_5G_BAND_4:
2940 thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
2941 thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
2942 thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
2943 thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
2944 break;
2945 }
2946
2947 if (path == RF_PATH_A) {
2948 thermal = tssi_info->thermal[RF_PATH_A];
2949
2950 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2951 "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2952
2953 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2954 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2955
2956 if (thermal == 0xff) {
2957 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2958 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2959
2960 for (i = 0; i < 64; i += 4) {
2961 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2962
2963 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2964 "[TSSI] write 0x%x val=0x%08x\n",
2965 0x5c00 + i, 0x0);
2966 }
2967
2968 } else {
2969 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2970 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2971 thermal);
2972
2973 i = 0;
2974 for (j = 0; j < 32; j++)
2975 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2976 -thm_down_a[i++] :
2977 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2978
2979 i = 1;
2980 for (j = 63; j >= 32; j--)
2981 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2982 thm_up_a[i++] :
2983 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2984
2985 for (i = 0; i < 64; i += 4) {
2986 tmp = __get_val(thm_ofst, i);
2987 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2988
2989 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2990 "[TSSI] write 0x%x val=0x%08x\n",
2991 0x5c00 + i, tmp);
2992 }
2993 }
2994 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2995 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2996
2997 } else {
2998 thermal = tssi_info->thermal[RF_PATH_B];
2999
3000 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3001 "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
3002
3003 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
3004 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
3005
3006 if (thermal == 0xff) {
3007 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3008 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3009
3010 for (i = 0; i < 64; i += 4) {
3011 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3012
3013 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3014 "[TSSI] write 0x%x val=0x%08x\n",
3015 0x7c00 + i, 0x0);
3016 }
3017
3018 } else {
3019 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
3020 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3021 thermal);
3022
3023 i = 0;
3024 for (j = 0; j < 32; j++)
3025 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3026 -thm_down_b[i++] :
3027 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3028
3029 i = 1;
3030 for (j = 63; j >= 32; j--)
3031 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3032 thm_up_b[i++] :
3033 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3034
3035 for (i = 0; i < 64; i += 4) {
3036 tmp = __get_val(thm_ofst, i);
3037 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3038
3039 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3040 "[TSSI] write 0x%x val=0x%08x\n",
3041 0x7c00 + i, tmp);
3042 }
3043 }
3044 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3045 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3046 }
3047 #undef __get_val
3048 }
3049
3050 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3051 enum rtw89_rf_path path)
3052 {
3053 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3054 &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
3055 &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
3056 }
3057
3058 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3059 enum rtw89_rf_path path)
3060 {
3061 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3062 &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
3063 &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
3064 }
3065
3066 static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3067 enum rtw89_rf_path path)
3068 {
3069 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3070 &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
3071 &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
3072 }
3073
3074 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3075 enum rtw89_rf_path path)
3076 {
3077 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3078 &rtw8852a_tssi_slope_defs_a_tbl,
3079 &rtw8852a_tssi_slope_defs_b_tbl);
3080 }
3081
3082 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3083 enum rtw89_rf_path path)
3084 {
3085 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3086 &rtw8852a_tssi_track_defs_a_tbl,
3087 &rtw8852a_tssi_track_defs_b_tbl);
3088 }
3089
3090 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3091 enum rtw89_phy_idx phy,
3092 enum rtw89_rf_path path)
3093 {
3094 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3095 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3096 &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3097 }
3098
3099 static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3100 enum rtw89_rf_path path)
3101 {
3102 u8 subband = rtwdev->hal.current_subband;
3103
3104 switch (subband) {
3105 default:
3106 case RTW89_CH_2G:
3107 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3108 &rtw8852a_tssi_pak_defs_a_2g_tbl,
3109 &rtw8852a_tssi_pak_defs_b_2g_tbl);
3110 break;
3111 case RTW89_CH_5G_BAND_1:
3112 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3113 &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
3114 &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
3115 break;
3116 case RTW89_CH_5G_BAND_3:
3117 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3118 &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
3119 &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
3120 break;
3121 case RTW89_CH_5G_BAND_4:
3122 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3123 &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
3124 &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
3125 break;
3126 }
3127 }
3128
3129 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3130 {
3131 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3132 u8 i;
3133
3134 for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3135 _tssi_set_track(rtwdev, phy, i);
3136 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3137
3138 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3139 &rtw8852a_tssi_enable_defs_a_tbl,
3140 &rtw8852a_tssi_enable_defs_b_tbl);
3141
3142 tssi_info->base_thermal[i] =
3143 ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3144 rtwdev->is_tssi_mode[i] = true;
3145 }
3146 }
3147
3148 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3149 {
3150 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3151
3152 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3153 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3154 }
3155
3156 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3157 {
3158 switch (ch) {
3159 case 1 ... 2:
3160 return 0;
3161 case 3 ... 5:
3162 return 1;
3163 case 6 ... 8:
3164 return 2;
3165 case 9 ... 11:
3166 return 3;
3167 case 12 ... 13:
3168 return 4;
3169 case 14:
3170 return 5;
3171 }
3172
3173 return 0;
3174 }
3175
3176 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3177 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3178 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3179 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3180 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3181
3182 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3183 {
3184 switch (ch) {
3185 case 1 ... 2:
3186 return 0;
3187 case 3 ... 5:
3188 return 1;
3189 case 6 ... 8:
3190 return 2;
3191 case 9 ... 11:
3192 return 3;
3193 case 12 ... 14:
3194 return 4;
3195 case 36 ... 40:
3196 return 5;
3197 case 41 ... 43:
3198 return TSSI_EXTRA_GROUP(5);
3199 case 44 ... 48:
3200 return 6;
3201 case 49 ... 51:
3202 return TSSI_EXTRA_GROUP(6);
3203 case 52 ... 56:
3204 return 7;
3205 case 57 ... 59:
3206 return TSSI_EXTRA_GROUP(7);
3207 case 60 ... 64:
3208 return 8;
3209 case 100 ... 104:
3210 return 9;
3211 case 105 ... 107:
3212 return TSSI_EXTRA_GROUP(9);
3213 case 108 ... 112:
3214 return 10;
3215 case 113 ... 115:
3216 return TSSI_EXTRA_GROUP(10);
3217 case 116 ... 120:
3218 return 11;
3219 case 121 ... 123:
3220 return TSSI_EXTRA_GROUP(11);
3221 case 124 ... 128:
3222 return 12;
3223 case 129 ... 131:
3224 return TSSI_EXTRA_GROUP(12);
3225 case 132 ... 136:
3226 return 13;
3227 case 137 ... 139:
3228 return TSSI_EXTRA_GROUP(13);
3229 case 140 ... 144:
3230 return 14;
3231 case 149 ... 153:
3232 return 15;
3233 case 154 ... 156:
3234 return TSSI_EXTRA_GROUP(15);
3235 case 157 ... 161:
3236 return 16;
3237 case 162 ... 164:
3238 return TSSI_EXTRA_GROUP(16);
3239 case 165 ... 169:
3240 return 17;
3241 case 170 ... 172:
3242 return TSSI_EXTRA_GROUP(17);
3243 case 173 ... 177:
3244 return 18;
3245 }
3246
3247 return 0;
3248 }
3249
3250 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3251 {
3252 switch (ch) {
3253 case 1 ... 8:
3254 return 0;
3255 case 9 ... 14:
3256 return 1;
3257 case 36 ... 48:
3258 return 2;
3259 case 52 ... 64:
3260 return 3;
3261 case 100 ... 112:
3262 return 4;
3263 case 116 ... 128:
3264 return 5;
3265 case 132 ... 144:
3266 return 6;
3267 case 149 ... 177:
3268 return 7;
3269 }
3270
3271 return 0;
3272 }
3273
3274 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3275 enum rtw89_rf_path path)
3276 {
3277 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3278 u8 ch = rtwdev->hal.current_channel;
3279 u32 gidx, gidx_1st, gidx_2nd;
3280 s8 de_1st = 0;
3281 s8 de_2nd = 0;
3282 s8 val;
3283
3284 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3285
3286 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3287 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3288 path, gidx);
3289
3290 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3291 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3292 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3293 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3294 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3295 val = (de_1st + de_2nd) / 2;
3296
3297 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3298 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3299 path, val, de_1st, de_2nd);
3300 } else {
3301 val = tssi_info->tssi_mcs[path][gidx];
3302
3303 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3304 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3305 }
3306
3307 return val;
3308 }
3309
3310 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3311 enum rtw89_phy_idx phy,
3312 enum rtw89_rf_path path)
3313 {
3314 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3315 u8 ch = rtwdev->hal.current_channel;
3316 u32 tgidx, tgidx_1st, tgidx_2nd;
3317 s8 tde_1st = 0;
3318 s8 tde_2nd = 0;
3319 s8 val;
3320
3321 tgidx = _tssi_get_trim_group(rtwdev, ch);
3322
3323 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3324 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3325 path, tgidx);
3326
3327 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3328 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3329 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3330 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3331 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3332 val = (tde_1st + tde_2nd) / 2;
3333
3334 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3335 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3336 path, val, tde_1st, tde_2nd);
3337 } else {
3338 val = tssi_info->tssi_trim[path][tgidx];
3339
3340 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3341 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3342 path, val);
3343 }
3344
3345 return val;
3346 }
3347
3348 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3349 enum rtw89_phy_idx phy)
3350 {
3351 #define __DE_MASK 0x003ff000
3352 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3353 static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
3354 static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
3355 static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
3356 static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
3357 static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
3358 static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
3359 static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
3360 static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
3361 u8 ch = rtwdev->hal.current_channel;
3362 u8 i, gidx;
3363 s8 ofdm_de;
3364 s8 trim_de;
3365 s32 val;
3366
3367 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3368 phy, ch);
3369
3370 for (i = 0; i < RF_PATH_NUM_8852A; i++) {
3371 gidx = _tssi_get_cck_group(rtwdev, ch);
3372 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3373 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3374
3375 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3376 "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3377 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3378
3379 rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
3380 rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
3381
3382 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3383 "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3384 r_cck_long[i],
3385 rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
3386 __DE_MASK));
3387
3388 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3389 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3390 val = ofdm_de + trim_de;
3391
3392 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3393 "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3394 i, ofdm_de, trim_de);
3395
3396 rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
3397 rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
3398 rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
3399 rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
3400 rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
3401 rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
3402
3403 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3404 "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3405 r_mcs_20m[i],
3406 rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
3407 __DE_MASK));
3408 }
3409 #undef __DE_MASK
3410 }
3411
3412 static void _tssi_track(struct rtw89_dev *rtwdev)
3413 {
3414 static const u32 tx_gain_scale_table[] = {
3415 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
3416 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
3417 };
3418 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3419 u8 path;
3420 u8 cur_ther;
3421 s32 delta_ther = 0, gain_offset_int, gain_offset_float;
3422 s8 gain_offset;
3423
3424 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
3425 __func__);
3426
3427 if (!rtwdev->is_tssi_mode[RF_PATH_A])
3428 return;
3429 if (!rtwdev->is_tssi_mode[RF_PATH_B])
3430 return;
3431
3432 for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
3433 if (!tssi_info->tssi_tracking_check[path]) {
3434 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
3435 continue;
3436 }
3437
3438 cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
3439 R_TSSI_THER + (path << 13),
3440 B_TSSI_THER);
3441
3442 if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
3443 continue;
3444
3445 delta_ther = cur_ther - tssi_info->base_thermal[path];
3446
3447 gain_offset = (s8)delta_ther * 15 / 10;
3448
3449 tssi_info->extra_ofst[path] = gain_offset;
3450
3451 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3452 "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
3453 tssi_info->base_thermal[path], gain_offset, path);
3454
3455 gain_offset_int = gain_offset >> 3;
3456 gain_offset_float = gain_offset & 7;
3457
3458 if (gain_offset_int > 15)
3459 gain_offset_int = 15;
3460 else if (gain_offset_int < -16)
3461 gain_offset_int = -16;
3462
3463 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
3464 B_DPD_OFT_EN, 0x1);
3465
3466 rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3467 B_TXGAIN_SCALE_EN, 0x1);
3468
3469 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
3470 B_DPD_OFT_ADDR, gain_offset_int);
3471
3472 rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
3473 B_TXGAIN_SCALE_OFT,
3474 tx_gain_scale_table[gain_offset_float]);
3475 }
3476 }
3477
3478 static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3479 {
3480 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3481 u8 ch = rtwdev->hal.current_channel, ch_tmp;
3482 u8 bw = rtwdev->hal.current_band_width;
3483 u8 subband = rtwdev->hal.current_subband;
3484 s8 power;
3485 s32 xdbm;
3486
3487 if (bw == RTW89_CHANNEL_WIDTH_40)
3488 ch_tmp = ch - 2;
3489 else if (bw == RTW89_CHANNEL_WIDTH_80)
3490 ch_tmp = ch - 6;
3491 else
3492 ch_tmp = ch;
3493
3494 power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
3495 RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
3496
3497 xdbm = power * 100 / 4;
3498
3499 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
3500 __func__, phy, xdbm);
3501
3502 if (xdbm > 1800 && subband == RTW89_CH_2G) {
3503 tssi_info->tssi_tracking_check[RF_PATH_A] = true;
3504 tssi_info->tssi_tracking_check[RF_PATH_B] = true;
3505 } else {
3506 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
3507 tssi_info->extra_ofst[RF_PATH_A] = 0;
3508 tssi_info->extra_ofst[RF_PATH_B] = 0;
3509 tssi_info->tssi_tracking_check[RF_PATH_A] = false;
3510 tssi_info->tssi_tracking_check[RF_PATH_B] = false;
3511 }
3512 }
3513
3514 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3515 u8 path, s16 pwr_dbm, u8 enable)
3516 {
3517 rtw8852a_bb_set_plcp_tx(rtwdev);
3518 rtw8852a_bb_cfg_tx_path(rtwdev, path);
3519 rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
3520 rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
3521 }
3522
3523 static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3524 {
3525 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3526 const struct rtw89_chip_info *mac_reg = rtwdev->chip;
3527 u8 ch = rtwdev->hal.current_channel, ch_tmp;
3528 u8 bw = rtwdev->hal.current_band_width;
3529 u32 tx_en;
3530 u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
3531 s8 power;
3532 s16 xdbm;
3533 u32 i, tx_counter = 0;
3534
3535 if (bw == RTW89_CHANNEL_WIDTH_40)
3536 ch_tmp = ch - 2;
3537 else if (bw == RTW89_CHANNEL_WIDTH_80)
3538 ch_tmp = ch - 6;
3539 else
3540 ch_tmp = ch;
3541
3542 power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
3543 RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
3544
3545 xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
3546
3547 if (xdbm > 1800)
3548 xdbm = 68;
3549 else
3550 xdbm = power * 2;
3551
3552 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3553 "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
3554 __func__, phy, power, xdbm);
3555
3556 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3557 rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3558 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
3559 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3560
3561 _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
3562 mdelay(15);
3563 _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
3564
3565 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
3566 tx_counter;
3567
3568 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
3569 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
3570 for (i = 0; i < 6; i++) {
3571 tssi_info->default_txagc_offset[RF_PATH_A] =
3572 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3573 MASKBYTE3);
3574
3575 if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
3576 break;
3577 }
3578 }
3579
3580 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
3581 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
3582 for (i = 0; i < 6; i++) {
3583 tssi_info->default_txagc_offset[RF_PATH_B] =
3584 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3585 MASKBYTE3);
3586
3587 if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
3588 break;
3589 }
3590 }
3591
3592 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3593 "[TSSI] %s: tx counter=%d\n",
3594 __func__, tx_counter);
3595
3596 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3597 "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
3598 tssi_info->default_txagc_offset[RF_PATH_A],
3599 tssi_info->default_txagc_offset[RF_PATH_B]);
3600
3601 rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
3602
3603 rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3604 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3605 }
3606
3607 void rtw8852a_rck(struct rtw89_dev *rtwdev)
3608 {
3609 u8 path;
3610
3611 for (path = 0; path < 2; path++)
3612 _rck(rtwdev, path);
3613 }
3614
3615 void rtw8852a_dack(struct rtw89_dev *rtwdev)
3616 {
3617 u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
3618
3619 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3620 _dac_cal(rtwdev, false);
3621 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3622 }
3623
3624 void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3625 {
3626 u32 tx_en;
3627 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3628
3629 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3630 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3631 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3632
3633 _iqk_init(rtwdev);
3634 if (rtwdev->dbcc_en)
3635 _iqk_dbcc(rtwdev, phy_idx);
3636 else
3637 _iqk(rtwdev, phy_idx, false);
3638
3639 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3640 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3641 }
3642
3643 void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
3644 {
3645 _iqk_track(rtwdev);
3646 }
3647
3648 void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3649 bool is_afe)
3650 {
3651 u32 tx_en;
3652 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3653
3654 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3655 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3656 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3657
3658 _rx_dck(rtwdev, phy_idx, is_afe);
3659
3660 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3661 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3662 }
3663
3664 void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
3665 {
3666 u32 tx_en;
3667 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
3668
3669 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3670 rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3671 _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3672
3673 rtwdev->dpk.is_dpk_enable = true;
3674 rtwdev->dpk.is_dpk_reload_en = false;
3675 _dpk(rtwdev, phy_idx, false);
3676
3677 rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3678 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3679 }
3680
3681 void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
3682 {
3683 _dpk_track(rtwdev);
3684 }
3685
3686 void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3687 {
3688 u8 i;
3689
3690 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3691 __func__, phy);
3692
3693 _tssi_disable(rtwdev, phy);
3694
3695 for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3696 _tssi_rf_setting(rtwdev, phy, i);
3697 _tssi_set_sys(rtwdev, phy);
3698 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3699 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3700 _tssi_set_dck(rtwdev, phy, i);
3701 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3702 _tssi_set_dac_gain_tbl(rtwdev, phy, i);
3703 _tssi_slope_cal_org(rtwdev, phy, i);
3704 _tssi_set_rf_gap_tbl(rtwdev, phy, i);
3705 _tssi_set_slope(rtwdev, phy, i);
3706 _tssi_pak(rtwdev, phy, i);
3707 }
3708
3709 _tssi_enable(rtwdev, phy);
3710 _tssi_set_efuse_to_de(rtwdev, phy);
3711 _tssi_high_power(rtwdev, phy);
3712 _tssi_pre_tx(rtwdev, phy);
3713 }
3714
3715 void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3716 {
3717 u8 i;
3718
3719 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
3720 __func__, phy);
3721
3722 if (!rtwdev->is_tssi_mode[RF_PATH_A])
3723 return;
3724 if (!rtwdev->is_tssi_mode[RF_PATH_B])
3725 return;
3726
3727 _tssi_disable(rtwdev, phy);
3728
3729 for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
3730 _tssi_rf_setting(rtwdev, phy, i);
3731 _tssi_set_sys(rtwdev, phy);
3732 _tssi_set_tmeter_tbl(rtwdev, phy, i);
3733 _tssi_pak(rtwdev, phy, i);
3734 }
3735
3736 _tssi_enable(rtwdev, phy);
3737 _tssi_set_efuse_to_de(rtwdev, phy);
3738 }
3739
3740 void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
3741 {
3742 _tssi_track(rtwdev);
3743 }
3744
3745 static
3746 void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3747 {
3748 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3749 return;
3750
3751
3752 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3753
3754 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
3755 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
3756
3757 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
3758 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
3759
3760
3761 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3762 }
3763
3764 static
3765 void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3766 {
3767 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3768 return;
3769
3770
3771 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
3772
3773 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
3774 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3775
3776 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
3777 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3778
3779
3780 rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
3781 }
3782
3783 static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
3784 enum rtw89_phy_idx phy, bool enable)
3785 {
3786 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3787 return;
3788
3789 if (enable) {
3790
3791 _rtw8852a_tssi_avg_scan(rtwdev, phy);
3792 } else {
3793
3794 _rtw8852a_tssi_set_avg(rtwdev, phy);
3795 }
3796 }
3797
3798 static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
3799 enum rtw89_phy_idx phy, bool enable)
3800 {
3801 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3802 u8 i;
3803
3804 if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3805 return;
3806
3807 if (enable) {
3808 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
3809 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
3810 for (i = 0; i < 6; i++) {
3811 tssi_info->default_txagc_offset[RF_PATH_A] =
3812 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
3813 B_TXAGC_BB);
3814 if (tssi_info->default_txagc_offset[RF_PATH_A])
3815 break;
3816 }
3817 }
3818
3819 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
3820 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
3821 for (i = 0; i < 6; i++) {
3822 tssi_info->default_txagc_offset[RF_PATH_B] =
3823 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
3824 B_TXAGC_BB_S1);
3825 if (tssi_info->default_txagc_offset[RF_PATH_B])
3826 break;
3827 }
3828 }
3829 } else {
3830 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
3831 tssi_info->default_txagc_offset[RF_PATH_A]);
3832 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
3833 tssi_info->default_txagc_offset[RF_PATH_B]);
3834
3835 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3836 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3837
3838 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3839 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3840 }
3841 }
3842
3843 void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
3844 bool scan_start, enum rtw89_phy_idx phy_idx)
3845 {
3846 if (scan_start) {
3847 rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
3848 rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
3849 } else {
3850 rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
3851 rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
3852 }
3853 }