0001
0002
0003
0004
0005 #include <linux/bcd.h>
0006
0007 #include "main.h"
0008 #include "reg.h"
0009 #include "fw.h"
0010 #include "phy.h"
0011 #include "debug.h"
0012 #include "regd.h"
0013 #include "sar.h"
0014
0015 struct phy_cfg_pair {
0016 u32 addr;
0017 u32 data;
0018 };
0019
0020 union phy_table_tile {
0021 struct rtw_phy_cond cond;
0022 struct phy_cfg_pair cfg;
0023 };
0024
0025 static const u32 db_invert_table[12][8] = {
0026 {10, 13, 16, 20,
0027 25, 32, 40, 50},
0028 {64, 80, 101, 128,
0029 160, 201, 256, 318},
0030 {401, 505, 635, 800,
0031 1007, 1268, 1596, 2010},
0032 {316, 398, 501, 631,
0033 794, 1000, 1259, 1585},
0034 {1995, 2512, 3162, 3981,
0035 5012, 6310, 7943, 10000},
0036 {12589, 15849, 19953, 25119,
0037 31623, 39811, 50119, 63098},
0038 {79433, 100000, 125893, 158489,
0039 199526, 251189, 316228, 398107},
0040 {501187, 630957, 794328, 1000000,
0041 1258925, 1584893, 1995262, 2511886},
0042 {3162278, 3981072, 5011872, 6309573,
0043 7943282, 1000000, 12589254, 15848932},
0044 {19952623, 25118864, 31622777, 39810717,
0045 50118723, 63095734, 79432823, 100000000},
0046 {125892541, 158489319, 199526232, 251188643,
0047 316227766, 398107171, 501187234, 630957345},
0048 {794328235, 1000000000, 1258925412, 1584893192,
0049 1995262315, 2511886432U, 3162277660U, 3981071706U}
0050 };
0051
0052 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
0053 u8 rtw_ofdm_rates[] = {
0054 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
0055 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
0056 DESC_RATE48M, DESC_RATE54M
0057 };
0058 u8 rtw_ht_1s_rates[] = {
0059 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
0060 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
0061 DESC_RATEMCS6, DESC_RATEMCS7
0062 };
0063 u8 rtw_ht_2s_rates[] = {
0064 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
0065 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
0066 DESC_RATEMCS14, DESC_RATEMCS15
0067 };
0068 u8 rtw_vht_1s_rates[] = {
0069 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
0070 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
0071 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
0072 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
0073 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
0074 };
0075 u8 rtw_vht_2s_rates[] = {
0076 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
0077 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
0078 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
0079 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
0080 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
0081 };
0082 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
0083 rtw_cck_rates, rtw_ofdm_rates,
0084 rtw_ht_1s_rates, rtw_ht_2s_rates,
0085 rtw_vht_1s_rates, rtw_vht_2s_rates
0086 };
0087 EXPORT_SYMBOL(rtw_rate_section);
0088
0089 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
0090 ARRAY_SIZE(rtw_cck_rates),
0091 ARRAY_SIZE(rtw_ofdm_rates),
0092 ARRAY_SIZE(rtw_ht_1s_rates),
0093 ARRAY_SIZE(rtw_ht_2s_rates),
0094 ARRAY_SIZE(rtw_vht_1s_rates),
0095 ARRAY_SIZE(rtw_vht_2s_rates)
0096 };
0097 EXPORT_SYMBOL(rtw_rate_size);
0098
0099 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
0100 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
0101 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
0102 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
0103 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
0104 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
0105
0106 enum rtw_phy_band_type {
0107 PHY_BAND_2G = 0,
0108 PHY_BAND_5G = 1,
0109 };
0110
0111 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
0112 {
0113 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0114 u8 i, j;
0115
0116 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
0117 for (j = 0; j < RTW_RF_PATH_MAX; j++)
0118 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
0119 }
0120
0121 dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
0122 }
0123
0124 void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l)
0125 {
0126 struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
0127
0128 rtw_write32_mask(rtwdev,
0129 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
0130 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask,
0131 l2h + edcca_th[EDCCA_TH_L2H_IDX].offset);
0132 rtw_write32_mask(rtwdev,
0133 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
0134 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask,
0135 h2l + edcca_th[EDCCA_TH_H2L_IDX].offset);
0136 }
0137 EXPORT_SYMBOL(rtw_phy_set_edcca_th);
0138
0139 void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
0140 {
0141 struct rtw_chip_info *chip = rtwdev->chip;
0142 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0143
0144
0145 if (!rtw_edcca_enabled) {
0146 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
0147 rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n");
0148 return;
0149 }
0150
0151 switch (rtwdev->regd.dfs_region) {
0152 case NL80211_DFS_ETSI:
0153 dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
0154 dm_info->l2h_th_ini = chip->l2h_th_ini_ad;
0155 break;
0156 case NL80211_DFS_JP:
0157 dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
0158 dm_info->l2h_th_ini = chip->l2h_th_ini_cs;
0159 break;
0160 default:
0161 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
0162 break;
0163 }
0164 }
0165
0166 static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
0167 {
0168 struct rtw_chip_info *chip = rtwdev->chip;
0169
0170 rtw_phy_adaptivity_set_mode(rtwdev);
0171 if (chip->ops->adaptivity_init)
0172 chip->ops->adaptivity_init(rtwdev);
0173 }
0174
0175 static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
0176 {
0177 if (rtwdev->chip->ops->adaptivity)
0178 rtwdev->chip->ops->adaptivity(rtwdev);
0179 }
0180
0181 static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
0182 {
0183 struct rtw_chip_info *chip = rtwdev->chip;
0184
0185 if (chip->ops->cfo_init)
0186 chip->ops->cfo_init(rtwdev);
0187 }
0188
0189 static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
0190 {
0191 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
0192
0193 path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path;
0194 path_div->path_a_cnt = 0;
0195 path_div->path_a_sum = 0;
0196 path_div->path_b_cnt = 0;
0197 path_div->path_b_sum = 0;
0198 }
0199
0200 void rtw_phy_init(struct rtw_dev *rtwdev)
0201 {
0202 struct rtw_chip_info *chip = rtwdev->chip;
0203 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0204 u32 addr, mask;
0205
0206 dm_info->fa_history[3] = 0;
0207 dm_info->fa_history[2] = 0;
0208 dm_info->fa_history[1] = 0;
0209 dm_info->fa_history[0] = 0;
0210 dm_info->igi_bitmap = 0;
0211 dm_info->igi_history[3] = 0;
0212 dm_info->igi_history[2] = 0;
0213 dm_info->igi_history[1] = 0;
0214
0215 addr = chip->dig[0].addr;
0216 mask = chip->dig[0].mask;
0217 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
0218 rtw_phy_cck_pd_init(rtwdev);
0219
0220 dm_info->iqk.done = false;
0221 rtw_phy_adaptivity_init(rtwdev);
0222 rtw_phy_cfo_init(rtwdev);
0223 rtw_phy_tx_path_div_init(rtwdev);
0224 }
0225 EXPORT_SYMBOL(rtw_phy_init);
0226
0227 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
0228 {
0229 struct rtw_chip_info *chip = rtwdev->chip;
0230 struct rtw_hal *hal = &rtwdev->hal;
0231 u32 addr, mask;
0232 u8 path;
0233
0234 if (chip->dig_cck) {
0235 const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
0236 rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
0237 }
0238
0239 for (path = 0; path < hal->rf_path_num; path++) {
0240 addr = chip->dig[path].addr;
0241 mask = chip->dig[path].mask;
0242 rtw_write32_mask(rtwdev, addr, mask, igi);
0243 }
0244 }
0245
0246 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
0247 {
0248 struct rtw_chip_info *chip = rtwdev->chip;
0249
0250 chip->ops->false_alarm_statistics(rtwdev);
0251 }
0252
0253 #define RA_FLOOR_TABLE_SIZE 7
0254 #define RA_FLOOR_UP_GAP 3
0255
0256 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
0257 {
0258 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
0259 u8 new_level = 0;
0260 int i;
0261
0262 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
0263 if (i >= old_level)
0264 table[i] += RA_FLOOR_UP_GAP;
0265
0266 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
0267 if (rssi < table[i]) {
0268 new_level = i;
0269 break;
0270 }
0271 }
0272
0273 return new_level;
0274 }
0275
0276 struct rtw_phy_stat_iter_data {
0277 struct rtw_dev *rtwdev;
0278 u8 min_rssi;
0279 };
0280
0281 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
0282 {
0283 struct rtw_phy_stat_iter_data *iter_data = data;
0284 struct rtw_dev *rtwdev = iter_data->rtwdev;
0285 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
0286 u8 rssi;
0287
0288 rssi = ewma_rssi_read(&si->avg_rssi);
0289 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
0290
0291 rtw_fw_send_rssi_info(rtwdev, si);
0292
0293 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
0294 }
0295
0296 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
0297 {
0298 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0299 struct rtw_phy_stat_iter_data data = {};
0300
0301 data.rtwdev = rtwdev;
0302 data.min_rssi = U8_MAX;
0303 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
0304
0305 dm_info->pre_min_rssi = dm_info->min_rssi;
0306 dm_info->min_rssi = data.min_rssi;
0307 }
0308
0309 static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev)
0310 {
0311 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0312
0313 dm_info->last_pkt_count = dm_info->cur_pkt_count;
0314 memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count));
0315 }
0316
0317 static void rtw_phy_statistics(struct rtw_dev *rtwdev)
0318 {
0319 rtw_phy_stat_rssi(rtwdev);
0320 rtw_phy_stat_false_alarm(rtwdev);
0321 rtw_phy_stat_rate_cnt(rtwdev);
0322 }
0323
0324 #define DIG_PERF_FA_TH_LOW 250
0325 #define DIG_PERF_FA_TH_HIGH 500
0326 #define DIG_PERF_FA_TH_EXTRA_HIGH 750
0327 #define DIG_PERF_MAX 0x5a
0328 #define DIG_PERF_MID 0x40
0329 #define DIG_CVRG_FA_TH_LOW 2000
0330 #define DIG_CVRG_FA_TH_HIGH 4000
0331 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
0332 #define DIG_CVRG_MAX 0x2a
0333 #define DIG_CVRG_MID 0x26
0334 #define DIG_CVRG_MIN 0x1c
0335 #define DIG_RSSI_GAIN_OFFSET 15
0336
0337 static bool
0338 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
0339 {
0340 u16 fa_lo = DIG_PERF_FA_TH_LOW;
0341 u16 fa_hi = DIG_PERF_FA_TH_HIGH;
0342 u16 *fa_history;
0343 u8 *igi_history;
0344 u8 damping_rssi;
0345 u8 min_rssi;
0346 u8 diff;
0347 u8 igi_bitmap;
0348 bool damping = false;
0349
0350 min_rssi = dm_info->min_rssi;
0351 if (dm_info->damping) {
0352 damping_rssi = dm_info->damping_rssi;
0353 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
0354 damping_rssi - min_rssi;
0355 if (diff > 3 || dm_info->damping_cnt++ > 20) {
0356 dm_info->damping = false;
0357 return false;
0358 }
0359
0360 return true;
0361 }
0362
0363 igi_history = dm_info->igi_history;
0364 fa_history = dm_info->fa_history;
0365 igi_bitmap = dm_info->igi_bitmap & 0xf;
0366 switch (igi_bitmap) {
0367 case 5:
0368
0369 if (igi_history[0] > igi_history[1] &&
0370 igi_history[2] > igi_history[3] &&
0371 igi_history[0] - igi_history[1] >= 2 &&
0372 igi_history[2] - igi_history[3] >= 2 &&
0373 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
0374 fa_history[2] > fa_hi && fa_history[3] < fa_lo)
0375 damping = true;
0376 break;
0377 case 9:
0378
0379 if (igi_history[0] > igi_history[1] &&
0380 igi_history[3] > igi_history[2] &&
0381 igi_history[0] - igi_history[1] >= 4 &&
0382 igi_history[3] - igi_history[2] >= 2 &&
0383 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
0384 fa_history[2] < fa_lo && fa_history[3] > fa_hi)
0385 damping = true;
0386 break;
0387 default:
0388 return false;
0389 }
0390
0391 if (damping) {
0392 dm_info->damping = true;
0393 dm_info->damping_cnt = 0;
0394 dm_info->damping_rssi = min_rssi;
0395 }
0396
0397 return damping;
0398 }
0399
0400 static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev,
0401 struct rtw_dm_info *dm_info,
0402 u8 *upper, u8 *lower, bool linked)
0403 {
0404 u8 dig_max, dig_min, dig_mid;
0405 u8 min_rssi;
0406
0407 if (linked) {
0408 dig_max = DIG_PERF_MAX;
0409 dig_mid = DIG_PERF_MID;
0410 dig_min = rtwdev->chip->dig_min;
0411 min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
0412 } else {
0413 dig_max = DIG_CVRG_MAX;
0414 dig_mid = DIG_CVRG_MID;
0415 dig_min = DIG_CVRG_MIN;
0416 min_rssi = dig_min;
0417 }
0418
0419
0420 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
0421
0422 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
0423 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
0424 }
0425
0426 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
0427 u16 *fa_th, u8 *step, bool linked)
0428 {
0429 u8 min_rssi, pre_min_rssi;
0430
0431 min_rssi = dm_info->min_rssi;
0432 pre_min_rssi = dm_info->pre_min_rssi;
0433 step[0] = 4;
0434 step[1] = 3;
0435 step[2] = 2;
0436
0437 if (linked) {
0438 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
0439 fa_th[1] = DIG_PERF_FA_TH_HIGH;
0440 fa_th[2] = DIG_PERF_FA_TH_LOW;
0441 if (pre_min_rssi > min_rssi) {
0442 step[0] = 6;
0443 step[1] = 4;
0444 step[2] = 2;
0445 }
0446 } else {
0447 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
0448 fa_th[1] = DIG_CVRG_FA_TH_HIGH;
0449 fa_th[2] = DIG_CVRG_FA_TH_LOW;
0450 }
0451 }
0452
0453 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
0454 {
0455 u8 *igi_history;
0456 u16 *fa_history;
0457 u8 igi_bitmap;
0458 bool up;
0459
0460 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
0461 igi_history = dm_info->igi_history;
0462 fa_history = dm_info->fa_history;
0463
0464 up = igi > igi_history[0];
0465 igi_bitmap |= up;
0466
0467 igi_history[3] = igi_history[2];
0468 igi_history[2] = igi_history[1];
0469 igi_history[1] = igi_history[0];
0470 igi_history[0] = igi;
0471
0472 fa_history[3] = fa_history[2];
0473 fa_history[2] = fa_history[1];
0474 fa_history[1] = fa_history[0];
0475 fa_history[0] = fa;
0476
0477 dm_info->igi_bitmap = igi_bitmap;
0478 }
0479
0480 static void rtw_phy_dig(struct rtw_dev *rtwdev)
0481 {
0482 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0483 u8 upper_bound, lower_bound;
0484 u8 pre_igi, cur_igi;
0485 u16 fa_th[3], fa_cnt;
0486 u8 level;
0487 u8 step[3];
0488 bool linked;
0489
0490 if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags))
0491 return;
0492
0493 if (rtw_phy_dig_check_damping(dm_info))
0494 return;
0495
0496 linked = !!rtwdev->sta_cnt;
0497
0498 fa_cnt = dm_info->total_fa_cnt;
0499 pre_igi = dm_info->igi_history[0];
0500
0501 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
0502
0503
0504
0505
0506
0507
0508 cur_igi = pre_igi;
0509 for (level = 0; level < 3; level++) {
0510 if (fa_cnt > fa_th[level]) {
0511 cur_igi += step[level];
0512 break;
0513 }
0514 }
0515 cur_igi -= 2;
0516
0517
0518
0519
0520
0521 rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound,
0522 linked);
0523 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
0524
0525
0526
0527
0528 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
0529
0530 if (cur_igi != pre_igi)
0531 rtw_phy_dig_write(rtwdev, cur_igi);
0532 }
0533
0534 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
0535 {
0536 struct rtw_dev *rtwdev = data;
0537 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
0538
0539 rtw_update_sta_info(rtwdev, si, false);
0540 }
0541
0542 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
0543 {
0544 if (rtwdev->watch_dog_cnt & 0x3)
0545 return;
0546
0547 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
0548 }
0549
0550 static u32 rtw_phy_get_rrsr_mask(struct rtw_dev *rtwdev, u8 rate_idx)
0551 {
0552 u8 rate_order;
0553
0554 rate_order = rate_idx;
0555
0556 if (rate_idx >= DESC_RATEVHT4SS_MCS0)
0557 rate_order -= DESC_RATEVHT4SS_MCS0;
0558 else if (rate_idx >= DESC_RATEVHT3SS_MCS0)
0559 rate_order -= DESC_RATEVHT3SS_MCS0;
0560 else if (rate_idx >= DESC_RATEVHT2SS_MCS0)
0561 rate_order -= DESC_RATEVHT2SS_MCS0;
0562 else if (rate_idx >= DESC_RATEVHT1SS_MCS0)
0563 rate_order -= DESC_RATEVHT1SS_MCS0;
0564 else if (rate_idx >= DESC_RATEMCS24)
0565 rate_order -= DESC_RATEMCS24;
0566 else if (rate_idx >= DESC_RATEMCS16)
0567 rate_order -= DESC_RATEMCS16;
0568 else if (rate_idx >= DESC_RATEMCS8)
0569 rate_order -= DESC_RATEMCS8;
0570 else if (rate_idx >= DESC_RATEMCS0)
0571 rate_order -= DESC_RATEMCS0;
0572 else if (rate_idx >= DESC_RATE6M)
0573 rate_order -= DESC_RATE6M;
0574 else
0575 rate_order -= DESC_RATE1M;
0576
0577 if (rate_idx >= DESC_RATEMCS0 || rate_order == 0)
0578 rate_order++;
0579
0580 return GENMASK(rate_order + RRSR_RATE_ORDER_CCK_LEN - 1, 0);
0581 }
0582
0583 static void rtw_phy_rrsr_mask_min_iter(void *data, struct ieee80211_sta *sta)
0584 {
0585 struct rtw_dev *rtwdev = (struct rtw_dev *)data;
0586 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
0587 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0588 u32 mask = 0;
0589
0590 mask = rtw_phy_get_rrsr_mask(rtwdev, si->ra_report.desc_rate);
0591 if (mask < dm_info->rrsr_mask_min)
0592 dm_info->rrsr_mask_min = mask;
0593 }
0594
0595 static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
0596 {
0597 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0598
0599 dm_info->rrsr_mask_min = RRSR_RATE_ORDER_MAX;
0600 rtw_iterate_stas_atomic(rtwdev, rtw_phy_rrsr_mask_min_iter, rtwdev);
0601 rtw_write32(rtwdev, REG_RRSR, dm_info->rrsr_val_init & dm_info->rrsr_mask_min);
0602 }
0603
0604 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
0605 {
0606 struct rtw_chip_info *chip = rtwdev->chip;
0607
0608 if (chip->ops->dpk_track)
0609 chip->ops->dpk_track(rtwdev);
0610 }
0611
0612 struct rtw_rx_addr_match_data {
0613 struct rtw_dev *rtwdev;
0614 struct ieee80211_hdr *hdr;
0615 struct rtw_rx_pkt_stat *pkt_stat;
0616 u8 *bssid;
0617 };
0618
0619 static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac,
0620 struct ieee80211_vif *vif)
0621 {
0622 struct rtw_rx_addr_match_data *iter_data = data;
0623 struct rtw_dev *rtwdev = iter_data->rtwdev;
0624 struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
0625 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0626 struct rtw_cfo_track *cfo = &dm_info->cfo_track;
0627 u8 *bssid = iter_data->bssid;
0628 u8 i;
0629
0630 if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
0631 return;
0632
0633 for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
0634 cfo->cfo_tail[i] += pkt_stat->cfo_tail[i];
0635 cfo->cfo_cnt[i]++;
0636 }
0637
0638 cfo->packet_count++;
0639 }
0640
0641 void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
0642 struct rtw_rx_pkt_stat *pkt_stat)
0643 {
0644 struct ieee80211_hdr *hdr = pkt_stat->hdr;
0645 struct rtw_rx_addr_match_data data = {};
0646
0647 if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
0648 ieee80211_is_ctl(hdr->frame_control))
0649 return;
0650
0651 data.rtwdev = rtwdev;
0652 data.hdr = hdr;
0653 data.pkt_stat = pkt_stat;
0654 data.bssid = get_hdr_bssid(hdr);
0655
0656 rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data);
0657 }
0658 EXPORT_SYMBOL(rtw_phy_parsing_cfo);
0659
0660 static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
0661 {
0662 struct rtw_chip_info *chip = rtwdev->chip;
0663
0664 if (chip->ops->cfo_track)
0665 chip->ops->cfo_track(rtwdev);
0666 }
0667
0668 #define CCK_PD_FA_LV1_MIN 1000
0669 #define CCK_PD_FA_LV0_MAX 500
0670
0671 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
0672 {
0673 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0674 u32 cck_fa_avg = dm_info->cck_fa_avg;
0675
0676 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
0677 return CCK_PD_LV1;
0678
0679 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
0680 return CCK_PD_LV0;
0681
0682 return CCK_PD_LV_MAX;
0683 }
0684
0685 #define CCK_PD_IGI_LV4_VAL 0x38
0686 #define CCK_PD_IGI_LV3_VAL 0x2a
0687 #define CCK_PD_IGI_LV2_VAL 0x24
0688 #define CCK_PD_RSSI_LV4_VAL 32
0689 #define CCK_PD_RSSI_LV3_VAL 32
0690 #define CCK_PD_RSSI_LV2_VAL 24
0691
0692 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
0693 {
0694 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0695 u8 igi = dm_info->igi_history[0];
0696 u8 rssi = dm_info->min_rssi;
0697 u32 cck_fa_avg = dm_info->cck_fa_avg;
0698
0699 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
0700 return CCK_PD_LV4;
0701 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
0702 return CCK_PD_LV3;
0703 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
0704 return CCK_PD_LV2;
0705 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
0706 return CCK_PD_LV1;
0707 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
0708 return CCK_PD_LV0;
0709
0710 return CCK_PD_LV_MAX;
0711 }
0712
0713 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
0714 {
0715 if (!rtw_is_assoc(rtwdev))
0716 return rtw_phy_cck_pd_lv_unlink(rtwdev);
0717 else
0718 return rtw_phy_cck_pd_lv_link(rtwdev);
0719 }
0720
0721 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
0722 {
0723 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0724 struct rtw_chip_info *chip = rtwdev->chip;
0725 u32 cck_fa = dm_info->cck_fa_cnt;
0726 u8 level;
0727
0728 if (rtwdev->hal.current_band_type != RTW_BAND_2G)
0729 return;
0730
0731 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET)
0732 dm_info->cck_fa_avg = cck_fa;
0733 else
0734 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2;
0735
0736 rtw_dbg(rtwdev, RTW_DBG_PHY, "IGI=0x%x, rssi_min=%d, cck_fa=%d\n",
0737 dm_info->igi_history[0], dm_info->min_rssi,
0738 dm_info->fa_history[0]);
0739 rtw_dbg(rtwdev, RTW_DBG_PHY, "cck_fa_avg=%d, cck_pd_default=%d\n",
0740 dm_info->cck_fa_avg, dm_info->cck_pd_default);
0741
0742 level = rtw_phy_cck_pd_lv(rtwdev);
0743
0744 if (level >= CCK_PD_LV_MAX)
0745 return;
0746
0747 if (chip->ops->cck_pd_set)
0748 chip->ops->cck_pd_set(rtwdev, level);
0749 }
0750
0751 static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
0752 {
0753 rtwdev->chip->ops->pwr_track(rtwdev);
0754 }
0755
0756 static void rtw_phy_ra_track(struct rtw_dev *rtwdev)
0757 {
0758 rtw_fw_update_wl_phy_info(rtwdev);
0759 rtw_phy_ra_info_update(rtwdev);
0760 rtw_phy_rrsr_update(rtwdev);
0761 }
0762
0763 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
0764 {
0765
0766 rtw_phy_statistics(rtwdev);
0767 rtw_phy_dig(rtwdev);
0768 rtw_phy_cck_pd(rtwdev);
0769 rtw_phy_ra_track(rtwdev);
0770 rtw_phy_tx_path_diversity(rtwdev);
0771 rtw_phy_cfo_track(rtwdev);
0772 rtw_phy_dpk_track(rtwdev);
0773 rtw_phy_pwr_track(rtwdev);
0774
0775 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY))
0776 rtw_fw_adaptivity(rtwdev);
0777 else
0778 rtw_phy_adaptivity(rtwdev);
0779 }
0780
0781 #define FRAC_BITS 3
0782
0783 static u8 rtw_phy_power_2_db(s8 power)
0784 {
0785 if (power <= -100 || power >= 20)
0786 return 0;
0787 else if (power >= 0)
0788 return 100;
0789 else
0790 return 100 + power;
0791 }
0792
0793 static u64 rtw_phy_db_2_linear(u8 power_db)
0794 {
0795 u8 i, j;
0796 u64 linear;
0797
0798 if (power_db > 96)
0799 power_db = 96;
0800 else if (power_db < 1)
0801 return 1;
0802
0803
0804 i = (power_db - 1) >> 3;
0805 j = (power_db - 1) - (i << 3);
0806
0807 linear = db_invert_table[i][j];
0808 linear = i > 2 ? linear << FRAC_BITS : linear;
0809
0810 return linear;
0811 }
0812
0813 static u8 rtw_phy_linear_2_db(u64 linear)
0814 {
0815 u8 i;
0816 u8 j;
0817 u32 dB;
0818
0819 if (linear >= db_invert_table[11][7])
0820 return 96;
0821
0822 for (i = 0; i < 12; i++) {
0823 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
0824 break;
0825 else if (i > 2 && linear <= db_invert_table[i][7])
0826 break;
0827 }
0828
0829 for (j = 0; j < 8; j++) {
0830 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
0831 break;
0832 else if (i > 2 && linear <= db_invert_table[i][j])
0833 break;
0834 }
0835
0836 if (j == 0 && i == 0)
0837 goto end;
0838
0839 if (j == 0) {
0840 if (i != 3) {
0841 if (db_invert_table[i][0] - linear >
0842 linear - db_invert_table[i - 1][7]) {
0843 i = i - 1;
0844 j = 7;
0845 }
0846 } else {
0847 if (db_invert_table[3][0] - linear >
0848 linear - db_invert_table[2][7]) {
0849 i = 2;
0850 j = 7;
0851 }
0852 }
0853 } else {
0854 if (db_invert_table[i][j] - linear >
0855 linear - db_invert_table[i][j - 1]) {
0856 j = j - 1;
0857 }
0858 }
0859 end:
0860 dB = (i << 3) + j + 1;
0861
0862 return dB;
0863 }
0864
0865 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
0866 {
0867 s8 power;
0868 u8 power_db;
0869 u64 linear;
0870 u64 sum = 0;
0871 u8 path;
0872
0873 for (path = 0; path < path_num; path++) {
0874 power = rf_power[path];
0875 power_db = rtw_phy_power_2_db(power);
0876 linear = rtw_phy_db_2_linear(power_db);
0877 sum += linear;
0878 }
0879
0880 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
0881 switch (path_num) {
0882 case 2:
0883 sum >>= 1;
0884 break;
0885 case 3:
0886 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
0887 break;
0888 case 4:
0889 sum >>= 2;
0890 break;
0891 default:
0892 break;
0893 }
0894
0895 return rtw_phy_linear_2_db(sum);
0896 }
0897 EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi);
0898
0899 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
0900 u32 addr, u32 mask)
0901 {
0902 struct rtw_hal *hal = &rtwdev->hal;
0903 struct rtw_chip_info *chip = rtwdev->chip;
0904 const u32 *base_addr = chip->rf_base_addr;
0905 u32 val, direct_addr;
0906
0907 if (rf_path >= hal->rf_phy_num) {
0908 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
0909 return INV_RF_DATA;
0910 }
0911
0912 addr &= 0xff;
0913 direct_addr = base_addr[rf_path] + (addr << 2);
0914 mask &= RFREG_MASK;
0915
0916 val = rtw_read32_mask(rtwdev, direct_addr, mask);
0917
0918 return val;
0919 }
0920 EXPORT_SYMBOL(rtw_phy_read_rf);
0921
0922 u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
0923 u32 addr, u32 mask)
0924 {
0925 struct rtw_hal *hal = &rtwdev->hal;
0926 struct rtw_chip_info *chip = rtwdev->chip;
0927 const struct rtw_rf_sipi_addr *rf_sipi_addr;
0928 const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
0929 u32 val32;
0930 u32 en_pi;
0931 u32 r_addr;
0932 u32 shift;
0933
0934 if (rf_path >= hal->rf_phy_num) {
0935 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
0936 return INV_RF_DATA;
0937 }
0938
0939 if (!chip->rf_sipi_read_addr) {
0940 rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n");
0941 return INV_RF_DATA;
0942 }
0943
0944 rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path];
0945 rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A];
0946
0947 addr &= 0xff;
0948
0949 val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2);
0950 val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23);
0951 rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32);
0952
0953
0954 val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2);
0955 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK);
0956 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK);
0957
0958 udelay(120);
0959
0960 en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8));
0961 r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read;
0962
0963 val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK);
0964
0965 shift = __ffs(mask);
0966
0967 return (val32 & mask) >> shift;
0968 }
0969 EXPORT_SYMBOL(rtw_phy_read_rf_sipi);
0970
0971 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
0972 u32 addr, u32 mask, u32 data)
0973 {
0974 struct rtw_hal *hal = &rtwdev->hal;
0975 struct rtw_chip_info *chip = rtwdev->chip;
0976 u32 *sipi_addr = chip->rf_sipi_addr;
0977 u32 data_and_addr;
0978 u32 old_data = 0;
0979 u32 shift;
0980
0981 if (rf_path >= hal->rf_phy_num) {
0982 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
0983 return false;
0984 }
0985
0986 addr &= 0xff;
0987 mask &= RFREG_MASK;
0988
0989 if (mask != RFREG_MASK) {
0990 old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK);
0991
0992 if (old_data == INV_RF_DATA) {
0993 rtw_err(rtwdev, "Write fail, rf is disabled\n");
0994 return false;
0995 }
0996
0997 shift = __ffs(mask);
0998 data = ((old_data) & (~mask)) | (data << shift);
0999 }
1000
1001 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
1002
1003 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
1004
1005 udelay(13);
1006
1007 return true;
1008 }
1009 EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi);
1010
1011 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
1012 u32 addr, u32 mask, u32 data)
1013 {
1014 struct rtw_hal *hal = &rtwdev->hal;
1015 struct rtw_chip_info *chip = rtwdev->chip;
1016 const u32 *base_addr = chip->rf_base_addr;
1017 u32 direct_addr;
1018
1019 if (rf_path >= hal->rf_phy_num) {
1020 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1021 return false;
1022 }
1023
1024 addr &= 0xff;
1025 direct_addr = base_addr[rf_path] + (addr << 2);
1026 mask &= RFREG_MASK;
1027
1028 rtw_write32_mask(rtwdev, direct_addr, mask, data);
1029
1030 udelay(1);
1031
1032 return true;
1033 }
1034
1035 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
1036 u32 addr, u32 mask, u32 data)
1037 {
1038 if (addr != 0x00)
1039 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
1040
1041 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
1042 }
1043 EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix);
1044
1045 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
1046 {
1047 struct rtw_hal *hal = &rtwdev->hal;
1048 struct rtw_efuse *efuse = &rtwdev->efuse;
1049 struct rtw_phy_cond cond = {0};
1050
1051 cond.cut = hal->cut_version ? hal->cut_version : 15;
1052 cond.pkg = pkg ? pkg : 15;
1053 cond.plat = 0x04;
1054 cond.rfe = efuse->rfe_option;
1055
1056 switch (rtw_hci_type(rtwdev)) {
1057 case RTW_HCI_TYPE_USB:
1058 cond.intf = INTF_USB;
1059 break;
1060 case RTW_HCI_TYPE_SDIO:
1061 cond.intf = INTF_SDIO;
1062 break;
1063 case RTW_HCI_TYPE_PCIE:
1064 default:
1065 cond.intf = INTF_PCIE;
1066 break;
1067 }
1068
1069 hal->phy_cond = cond;
1070
1071 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
1072 }
1073
1074 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
1075 {
1076 struct rtw_hal *hal = &rtwdev->hal;
1077 struct rtw_phy_cond drv_cond = hal->phy_cond;
1078
1079 if (cond.cut && cond.cut != drv_cond.cut)
1080 return false;
1081
1082 if (cond.pkg && cond.pkg != drv_cond.pkg)
1083 return false;
1084
1085 if (cond.intf && cond.intf != drv_cond.intf)
1086 return false;
1087
1088 if (cond.rfe != drv_cond.rfe)
1089 return false;
1090
1091 return true;
1092 }
1093
1094 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
1095 {
1096 const union phy_table_tile *p = tbl->data;
1097 const union phy_table_tile *end = p + tbl->size / 2;
1098 struct rtw_phy_cond pos_cond = {0};
1099 bool is_matched = true, is_skipped = false;
1100
1101 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
1102
1103 for (; p < end; p++) {
1104 if (p->cond.pos) {
1105 switch (p->cond.branch) {
1106 case BRANCH_ENDIF:
1107 is_matched = true;
1108 is_skipped = false;
1109 break;
1110 case BRANCH_ELSE:
1111 is_matched = is_skipped ? false : true;
1112 break;
1113 case BRANCH_IF:
1114 case BRANCH_ELIF:
1115 default:
1116 pos_cond = p->cond;
1117 break;
1118 }
1119 } else if (p->cond.neg) {
1120 if (!is_skipped) {
1121 if (check_positive(rtwdev, pos_cond)) {
1122 is_matched = true;
1123 is_skipped = true;
1124 } else {
1125 is_matched = false;
1126 is_skipped = false;
1127 }
1128 } else {
1129 is_matched = false;
1130 }
1131 } else if (is_matched) {
1132 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
1133 }
1134 }
1135 }
1136 EXPORT_SYMBOL(rtw_parse_tbl_phy_cond);
1137
1138 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
1139
1140 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
1141 {
1142 if (rtwdev->chip->is_pwr_by_rate_dec)
1143 return bcd_to_dec_pwr_by_rate(hex, i);
1144
1145 return (hex >> (i * 8)) & 0xFF;
1146 }
1147
1148 static void
1149 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
1150 u32 addr, u32 mask, u32 val, u8 *rate,
1151 u8 *pwr_by_rate, u8 *rate_num)
1152 {
1153 int i;
1154
1155 switch (addr) {
1156 case 0xE00:
1157 case 0x830:
1158 rate[0] = DESC_RATE6M;
1159 rate[1] = DESC_RATE9M;
1160 rate[2] = DESC_RATE12M;
1161 rate[3] = DESC_RATE18M;
1162 for (i = 0; i < 4; ++i)
1163 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1164 *rate_num = 4;
1165 break;
1166 case 0xE04:
1167 case 0x834:
1168 rate[0] = DESC_RATE24M;
1169 rate[1] = DESC_RATE36M;
1170 rate[2] = DESC_RATE48M;
1171 rate[3] = DESC_RATE54M;
1172 for (i = 0; i < 4; ++i)
1173 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1174 *rate_num = 4;
1175 break;
1176 case 0xE08:
1177 rate[0] = DESC_RATE1M;
1178 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
1179 *rate_num = 1;
1180 break;
1181 case 0x86C:
1182 if (mask == 0xffffff00) {
1183 rate[0] = DESC_RATE2M;
1184 rate[1] = DESC_RATE5_5M;
1185 rate[2] = DESC_RATE11M;
1186 for (i = 1; i < 4; ++i)
1187 pwr_by_rate[i - 1] =
1188 tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1189 *rate_num = 3;
1190 } else if (mask == 0x000000ff) {
1191 rate[0] = DESC_RATE11M;
1192 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
1193 *rate_num = 1;
1194 }
1195 break;
1196 case 0xE10:
1197 case 0x83C:
1198 rate[0] = DESC_RATEMCS0;
1199 rate[1] = DESC_RATEMCS1;
1200 rate[2] = DESC_RATEMCS2;
1201 rate[3] = DESC_RATEMCS3;
1202 for (i = 0; i < 4; ++i)
1203 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1204 *rate_num = 4;
1205 break;
1206 case 0xE14:
1207 case 0x848:
1208 rate[0] = DESC_RATEMCS4;
1209 rate[1] = DESC_RATEMCS5;
1210 rate[2] = DESC_RATEMCS6;
1211 rate[3] = DESC_RATEMCS7;
1212 for (i = 0; i < 4; ++i)
1213 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1214 *rate_num = 4;
1215 break;
1216 case 0xE18:
1217 case 0x84C:
1218 rate[0] = DESC_RATEMCS8;
1219 rate[1] = DESC_RATEMCS9;
1220 rate[2] = DESC_RATEMCS10;
1221 rate[3] = DESC_RATEMCS11;
1222 for (i = 0; i < 4; ++i)
1223 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1224 *rate_num = 4;
1225 break;
1226 case 0xE1C:
1227 case 0x868:
1228 rate[0] = DESC_RATEMCS12;
1229 rate[1] = DESC_RATEMCS13;
1230 rate[2] = DESC_RATEMCS14;
1231 rate[3] = DESC_RATEMCS15;
1232 for (i = 0; i < 4; ++i)
1233 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1234 *rate_num = 4;
1235 break;
1236 case 0x838:
1237 rate[0] = DESC_RATE1M;
1238 rate[1] = DESC_RATE2M;
1239 rate[2] = DESC_RATE5_5M;
1240 for (i = 1; i < 4; ++i)
1241 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
1242 val, i);
1243 *rate_num = 3;
1244 break;
1245 case 0xC20:
1246 case 0xE20:
1247 case 0x1820:
1248 case 0x1A20:
1249 rate[0] = DESC_RATE1M;
1250 rate[1] = DESC_RATE2M;
1251 rate[2] = DESC_RATE5_5M;
1252 rate[3] = DESC_RATE11M;
1253 for (i = 0; i < 4; ++i)
1254 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1255 *rate_num = 4;
1256 break;
1257 case 0xC24:
1258 case 0xE24:
1259 case 0x1824:
1260 case 0x1A24:
1261 rate[0] = DESC_RATE6M;
1262 rate[1] = DESC_RATE9M;
1263 rate[2] = DESC_RATE12M;
1264 rate[3] = DESC_RATE18M;
1265 for (i = 0; i < 4; ++i)
1266 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1267 *rate_num = 4;
1268 break;
1269 case 0xC28:
1270 case 0xE28:
1271 case 0x1828:
1272 case 0x1A28:
1273 rate[0] = DESC_RATE24M;
1274 rate[1] = DESC_RATE36M;
1275 rate[2] = DESC_RATE48M;
1276 rate[3] = DESC_RATE54M;
1277 for (i = 0; i < 4; ++i)
1278 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1279 *rate_num = 4;
1280 break;
1281 case 0xC2C:
1282 case 0xE2C:
1283 case 0x182C:
1284 case 0x1A2C:
1285 rate[0] = DESC_RATEMCS0;
1286 rate[1] = DESC_RATEMCS1;
1287 rate[2] = DESC_RATEMCS2;
1288 rate[3] = DESC_RATEMCS3;
1289 for (i = 0; i < 4; ++i)
1290 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1291 *rate_num = 4;
1292 break;
1293 case 0xC30:
1294 case 0xE30:
1295 case 0x1830:
1296 case 0x1A30:
1297 rate[0] = DESC_RATEMCS4;
1298 rate[1] = DESC_RATEMCS5;
1299 rate[2] = DESC_RATEMCS6;
1300 rate[3] = DESC_RATEMCS7;
1301 for (i = 0; i < 4; ++i)
1302 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1303 *rate_num = 4;
1304 break;
1305 case 0xC34:
1306 case 0xE34:
1307 case 0x1834:
1308 case 0x1A34:
1309 rate[0] = DESC_RATEMCS8;
1310 rate[1] = DESC_RATEMCS9;
1311 rate[2] = DESC_RATEMCS10;
1312 rate[3] = DESC_RATEMCS11;
1313 for (i = 0; i < 4; ++i)
1314 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1315 *rate_num = 4;
1316 break;
1317 case 0xC38:
1318 case 0xE38:
1319 case 0x1838:
1320 case 0x1A38:
1321 rate[0] = DESC_RATEMCS12;
1322 rate[1] = DESC_RATEMCS13;
1323 rate[2] = DESC_RATEMCS14;
1324 rate[3] = DESC_RATEMCS15;
1325 for (i = 0; i < 4; ++i)
1326 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1327 *rate_num = 4;
1328 break;
1329 case 0xC3C:
1330 case 0xE3C:
1331 case 0x183C:
1332 case 0x1A3C:
1333 rate[0] = DESC_RATEVHT1SS_MCS0;
1334 rate[1] = DESC_RATEVHT1SS_MCS1;
1335 rate[2] = DESC_RATEVHT1SS_MCS2;
1336 rate[3] = DESC_RATEVHT1SS_MCS3;
1337 for (i = 0; i < 4; ++i)
1338 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1339 *rate_num = 4;
1340 break;
1341 case 0xC40:
1342 case 0xE40:
1343 case 0x1840:
1344 case 0x1A40:
1345 rate[0] = DESC_RATEVHT1SS_MCS4;
1346 rate[1] = DESC_RATEVHT1SS_MCS5;
1347 rate[2] = DESC_RATEVHT1SS_MCS6;
1348 rate[3] = DESC_RATEVHT1SS_MCS7;
1349 for (i = 0; i < 4; ++i)
1350 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1351 *rate_num = 4;
1352 break;
1353 case 0xC44:
1354 case 0xE44:
1355 case 0x1844:
1356 case 0x1A44:
1357 rate[0] = DESC_RATEVHT1SS_MCS8;
1358 rate[1] = DESC_RATEVHT1SS_MCS9;
1359 rate[2] = DESC_RATEVHT2SS_MCS0;
1360 rate[3] = DESC_RATEVHT2SS_MCS1;
1361 for (i = 0; i < 4; ++i)
1362 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1363 *rate_num = 4;
1364 break;
1365 case 0xC48:
1366 case 0xE48:
1367 case 0x1848:
1368 case 0x1A48:
1369 rate[0] = DESC_RATEVHT2SS_MCS2;
1370 rate[1] = DESC_RATEVHT2SS_MCS3;
1371 rate[2] = DESC_RATEVHT2SS_MCS4;
1372 rate[3] = DESC_RATEVHT2SS_MCS5;
1373 for (i = 0; i < 4; ++i)
1374 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1375 *rate_num = 4;
1376 break;
1377 case 0xC4C:
1378 case 0xE4C:
1379 case 0x184C:
1380 case 0x1A4C:
1381 rate[0] = DESC_RATEVHT2SS_MCS6;
1382 rate[1] = DESC_RATEVHT2SS_MCS7;
1383 rate[2] = DESC_RATEVHT2SS_MCS8;
1384 rate[3] = DESC_RATEVHT2SS_MCS9;
1385 for (i = 0; i < 4; ++i)
1386 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1387 *rate_num = 4;
1388 break;
1389 case 0xCD8:
1390 case 0xED8:
1391 case 0x18D8:
1392 case 0x1AD8:
1393 rate[0] = DESC_RATEMCS16;
1394 rate[1] = DESC_RATEMCS17;
1395 rate[2] = DESC_RATEMCS18;
1396 rate[3] = DESC_RATEMCS19;
1397 for (i = 0; i < 4; ++i)
1398 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1399 *rate_num = 4;
1400 break;
1401 case 0xCDC:
1402 case 0xEDC:
1403 case 0x18DC:
1404 case 0x1ADC:
1405 rate[0] = DESC_RATEMCS20;
1406 rate[1] = DESC_RATEMCS21;
1407 rate[2] = DESC_RATEMCS22;
1408 rate[3] = DESC_RATEMCS23;
1409 for (i = 0; i < 4; ++i)
1410 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1411 *rate_num = 4;
1412 break;
1413 case 0xCE0:
1414 case 0xEE0:
1415 case 0x18E0:
1416 case 0x1AE0:
1417 rate[0] = DESC_RATEVHT3SS_MCS0;
1418 rate[1] = DESC_RATEVHT3SS_MCS1;
1419 rate[2] = DESC_RATEVHT3SS_MCS2;
1420 rate[3] = DESC_RATEVHT3SS_MCS3;
1421 for (i = 0; i < 4; ++i)
1422 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1423 *rate_num = 4;
1424 break;
1425 case 0xCE4:
1426 case 0xEE4:
1427 case 0x18E4:
1428 case 0x1AE4:
1429 rate[0] = DESC_RATEVHT3SS_MCS4;
1430 rate[1] = DESC_RATEVHT3SS_MCS5;
1431 rate[2] = DESC_RATEVHT3SS_MCS6;
1432 rate[3] = DESC_RATEVHT3SS_MCS7;
1433 for (i = 0; i < 4; ++i)
1434 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1435 *rate_num = 4;
1436 break;
1437 case 0xCE8:
1438 case 0xEE8:
1439 case 0x18E8:
1440 case 0x1AE8:
1441 rate[0] = DESC_RATEVHT3SS_MCS8;
1442 rate[1] = DESC_RATEVHT3SS_MCS9;
1443 for (i = 0; i < 2; ++i)
1444 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1445 *rate_num = 2;
1446 break;
1447 default:
1448 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
1449 break;
1450 }
1451 }
1452
1453 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
1454 u32 band, u32 rfpath, u32 txnum,
1455 u32 regaddr, u32 bitmask, u32 data)
1456 {
1457 struct rtw_hal *hal = &rtwdev->hal;
1458 u8 rate_num = 0;
1459 u8 rate;
1460 u8 rates[RTW_RF_PATH_MAX] = {0};
1461 s8 offset;
1462 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
1463 int i;
1464
1465 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
1466 rates, pwr_by_rate, &rate_num);
1467
1468 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
1469 (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
1470 rate_num > RTW_RF_PATH_MAX))
1471 return;
1472
1473 for (i = 0; i < rate_num; i++) {
1474 offset = pwr_by_rate[i];
1475 rate = rates[i];
1476 if (band == PHY_BAND_2G)
1477 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
1478 else if (band == PHY_BAND_5G)
1479 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
1480 else
1481 continue;
1482 }
1483 }
1484
1485 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
1486 {
1487 const struct rtw_phy_pg_cfg_pair *p = tbl->data;
1488 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size;
1489
1490 for (; p < end; p++) {
1491 if (p->addr == 0xfe || p->addr == 0xffe) {
1492 msleep(50);
1493 continue;
1494 }
1495 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
1496 p->tx_num, p->addr, p->bitmask,
1497 p->data);
1498 }
1499 }
1500 EXPORT_SYMBOL(rtw_parse_tbl_bb_pg);
1501
1502 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
1503 36, 38, 40, 42, 44, 46, 48,
1504 52, 54, 56, 58, 60, 62, 64,
1505 100, 102, 104, 106, 108, 110, 112,
1506 116, 118, 120, 122, 124, 126, 128,
1507 132, 134, 136, 138, 140, 142, 144,
1508 149, 151, 153, 155, 157, 159, 161,
1509 165, 167, 169, 171, 173, 175, 177};
1510
1511 static int rtw_channel_to_idx(u8 band, u8 channel)
1512 {
1513 int ch_idx;
1514 u8 n_channel;
1515
1516 if (band == PHY_BAND_2G) {
1517 ch_idx = channel - 1;
1518 n_channel = RTW_MAX_CHANNEL_NUM_2G;
1519 } else if (band == PHY_BAND_5G) {
1520 n_channel = RTW_MAX_CHANNEL_NUM_5G;
1521 for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
1522 if (rtw_channel_idx_5g[ch_idx] == channel)
1523 break;
1524 } else {
1525 return -1;
1526 }
1527
1528 if (ch_idx >= n_channel)
1529 return -1;
1530
1531 return ch_idx;
1532 }
1533
1534 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
1535 u8 bw, u8 rs, u8 ch, s8 pwr_limit)
1536 {
1537 struct rtw_hal *hal = &rtwdev->hal;
1538 u8 max_power_index = rtwdev->chip->max_power_index;
1539 s8 ww;
1540 int ch_idx;
1541
1542 pwr_limit = clamp_t(s8, pwr_limit,
1543 -max_power_index, max_power_index);
1544 ch_idx = rtw_channel_to_idx(band, ch);
1545
1546 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
1547 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
1548 WARN(1,
1549 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
1550 regd, band, bw, rs, ch_idx, pwr_limit);
1551 return;
1552 }
1553
1554 if (band == PHY_BAND_2G) {
1555 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
1556 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx];
1557 ww = min_t(s8, ww, pwr_limit);
1558 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1559 } else if (band == PHY_BAND_5G) {
1560 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
1561 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx];
1562 ww = min_t(s8, ww, pwr_limit);
1563 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1564 }
1565 }
1566
1567
1568 static void
1569 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
1570 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht)
1571 {
1572 struct rtw_hal *hal = &rtwdev->hal;
1573 u8 max_power_index = rtwdev->chip->max_power_index;
1574 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx];
1575 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx];
1576
1577 if (lmt_ht == lmt_vht)
1578 return;
1579
1580 if (lmt_ht == max_power_index)
1581 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht;
1582
1583 else if (lmt_vht == max_power_index)
1584 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht;
1585 }
1586
1587
1588 static void
1589 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
1590 {
1591 u8 rs_idx, rs_ht, rs_vht;
1592 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
1593 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
1594
1595 for (rs_idx = 0; rs_idx < 2; rs_idx++) {
1596 rs_ht = rs_cmp[rs_idx][0];
1597 rs_vht = rs_cmp[rs_idx][1];
1598
1599 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht);
1600 }
1601 }
1602
1603
1604 static void
1605 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw)
1606 {
1607 u8 ch_idx;
1608
1609 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++)
1610 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx);
1611 }
1612
1613
1614 static void
1615 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd)
1616 {
1617 u8 bw;
1618
1619 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++)
1620 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw);
1621 }
1622
1623
1624 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
1625 {
1626 u8 regd;
1627
1628 for (regd = 0; regd < RTW_REGD_MAX; regd++)
1629 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
1630 }
1631
1632 static void
1633 __cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs)
1634 {
1635 u8 ch;
1636
1637 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
1638 hal->tx_pwr_limit_2g[regd][bw][rs][ch] =
1639 hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch];
1640
1641 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
1642 hal->tx_pwr_limit_5g[regd][bw][rs][ch] =
1643 hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch];
1644 }
1645
1646 static void
1647 rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
1648 {
1649 u8 bw, rs;
1650
1651 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1652 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1653 __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
1654 bw, rs);
1655 }
1656
1657 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
1658 const struct rtw_table *tbl)
1659 {
1660 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
1661 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
1662 u32 regd_cfg_flag = 0;
1663 u8 regd_alt;
1664 u8 i;
1665
1666 for (; p < end; p++) {
1667 regd_cfg_flag |= BIT(p->regd);
1668 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
1669 p->bw, p->rs, p->ch, p->txpwr_lmt);
1670 }
1671
1672 for (i = 0; i < RTW_REGD_MAX; i++) {
1673 if (i == RTW_REGD_WW)
1674 continue;
1675
1676 if (regd_cfg_flag & BIT(i))
1677 continue;
1678
1679 rtw_dbg(rtwdev, RTW_DBG_REGD,
1680 "txpwr regd %d does not be configured\n", i);
1681
1682 if (rtw_regd_has_alt(i, ®d_alt) &&
1683 regd_cfg_flag & BIT(regd_alt)) {
1684 rtw_dbg(rtwdev, RTW_DBG_REGD,
1685 "cfg txpwr regd %d by regd %d as alternative\n",
1686 i, regd_alt);
1687
1688 rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt);
1689 continue;
1690 }
1691
1692 rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i);
1693 rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW);
1694 }
1695
1696 rtw_xref_txpwr_lmt(rtwdev);
1697 }
1698 EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
1699
1700 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1701 u32 addr, u32 data)
1702 {
1703 rtw_write8(rtwdev, addr, data);
1704 }
1705 EXPORT_SYMBOL(rtw_phy_cfg_mac);
1706
1707 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1708 u32 addr, u32 data)
1709 {
1710 rtw_write32(rtwdev, addr, data);
1711 }
1712 EXPORT_SYMBOL(rtw_phy_cfg_agc);
1713
1714 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1715 u32 addr, u32 data)
1716 {
1717 if (addr == 0xfe)
1718 msleep(50);
1719 else if (addr == 0xfd)
1720 mdelay(5);
1721 else if (addr == 0xfc)
1722 mdelay(1);
1723 else if (addr == 0xfb)
1724 usleep_range(50, 60);
1725 else if (addr == 0xfa)
1726 udelay(5);
1727 else if (addr == 0xf9)
1728 udelay(1);
1729 else
1730 rtw_write32(rtwdev, addr, data);
1731 }
1732 EXPORT_SYMBOL(rtw_phy_cfg_bb);
1733
1734 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1735 u32 addr, u32 data)
1736 {
1737 if (addr == 0xffe) {
1738 msleep(50);
1739 } else if (addr == 0xfe) {
1740 usleep_range(100, 110);
1741 } else {
1742 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
1743 udelay(1);
1744 }
1745 }
1746 EXPORT_SYMBOL(rtw_phy_cfg_rf);
1747
1748 static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
1749 {
1750 struct rtw_chip_info *chip = rtwdev->chip;
1751 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1752
1753 if (!chip->rfk_init_tbl)
1754 return;
1755
1756 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1);
1757 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1);
1758 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1);
1759 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1);
1760 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0);
1761
1762 rtw_load_table(rtwdev, chip->rfk_init_tbl);
1763
1764 dpk_info->is_dpk_pwr_on = true;
1765 }
1766
1767 void rtw_phy_load_tables(struct rtw_dev *rtwdev)
1768 {
1769 struct rtw_chip_info *chip = rtwdev->chip;
1770 u8 rf_path;
1771
1772 rtw_load_table(rtwdev, chip->mac_tbl);
1773 rtw_load_table(rtwdev, chip->bb_tbl);
1774 rtw_load_table(rtwdev, chip->agc_tbl);
1775 rtw_load_rfk_table(rtwdev);
1776
1777 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
1778 const struct rtw_table *tbl;
1779
1780 tbl = chip->rf_tbl[rf_path];
1781 rtw_load_table(rtwdev, tbl);
1782 }
1783 }
1784 EXPORT_SYMBOL(rtw_phy_load_tables);
1785
1786 static u8 rtw_get_channel_group(u8 channel, u8 rate)
1787 {
1788 switch (channel) {
1789 default:
1790 WARN_ON(1);
1791 fallthrough;
1792 case 1:
1793 case 2:
1794 case 36:
1795 case 38:
1796 case 40:
1797 case 42:
1798 return 0;
1799 case 3:
1800 case 4:
1801 case 5:
1802 case 44:
1803 case 46:
1804 case 48:
1805 case 50:
1806 return 1;
1807 case 6:
1808 case 7:
1809 case 8:
1810 case 52:
1811 case 54:
1812 case 56:
1813 case 58:
1814 return 2;
1815 case 9:
1816 case 10:
1817 case 11:
1818 case 60:
1819 case 62:
1820 case 64:
1821 return 3;
1822 case 12:
1823 case 13:
1824 case 100:
1825 case 102:
1826 case 104:
1827 case 106:
1828 return 4;
1829 case 14:
1830 return rate <= DESC_RATE11M ? 5 : 4;
1831 case 108:
1832 case 110:
1833 case 112:
1834 case 114:
1835 return 5;
1836 case 116:
1837 case 118:
1838 case 120:
1839 case 122:
1840 return 6;
1841 case 124:
1842 case 126:
1843 case 128:
1844 case 130:
1845 return 7;
1846 case 132:
1847 case 134:
1848 case 136:
1849 case 138:
1850 return 8;
1851 case 140:
1852 case 142:
1853 case 144:
1854 return 9;
1855 case 149:
1856 case 151:
1857 case 153:
1858 case 155:
1859 return 10;
1860 case 157:
1861 case 159:
1862 case 161:
1863 return 11;
1864 case 165:
1865 case 167:
1866 case 169:
1867 case 171:
1868 return 12;
1869 case 173:
1870 case 175:
1871 case 177:
1872 return 13;
1873 }
1874 }
1875
1876 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
1877 {
1878 struct rtw_chip_info *chip = rtwdev->chip;
1879 s8 dpd_diff = 0;
1880
1881 if (!chip->en_dis_dpd)
1882 return 0;
1883
1884 #define RTW_DPD_RATE_CHECK(_rate) \
1885 case DESC_RATE ## _rate: \
1886 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \
1887 dpd_diff = -6 * chip->txgi_factor; \
1888 break
1889
1890 switch (rate) {
1891 RTW_DPD_RATE_CHECK(6M);
1892 RTW_DPD_RATE_CHECK(9M);
1893 RTW_DPD_RATE_CHECK(MCS0);
1894 RTW_DPD_RATE_CHECK(MCS1);
1895 RTW_DPD_RATE_CHECK(MCS8);
1896 RTW_DPD_RATE_CHECK(MCS9);
1897 RTW_DPD_RATE_CHECK(VHT1SS_MCS0);
1898 RTW_DPD_RATE_CHECK(VHT1SS_MCS1);
1899 RTW_DPD_RATE_CHECK(VHT2SS_MCS0);
1900 RTW_DPD_RATE_CHECK(VHT2SS_MCS1);
1901 }
1902 #undef RTW_DPD_RATE_CHECK
1903
1904 return dpd_diff;
1905 }
1906
1907 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
1908 struct rtw_2g_txpwr_idx *pwr_idx_2g,
1909 enum rtw_bandwidth bandwidth,
1910 u8 rate, u8 group)
1911 {
1912 struct rtw_chip_info *chip = rtwdev->chip;
1913 u8 tx_power;
1914 bool mcs_rate;
1915 bool above_2ss;
1916 u8 factor = chip->txgi_factor;
1917
1918 if (rate <= DESC_RATE11M)
1919 tx_power = pwr_idx_2g->cck_base[group];
1920 else
1921 tx_power = pwr_idx_2g->bw40_base[group];
1922
1923 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1924 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
1925
1926 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1927 (rate >= DESC_RATEVHT1SS_MCS0 &&
1928 rate <= DESC_RATEVHT2SS_MCS9);
1929 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1930 (rate >= DESC_RATEVHT2SS_MCS0);
1931
1932 if (!mcs_rate)
1933 return tx_power;
1934
1935 switch (bandwidth) {
1936 default:
1937 WARN_ON(1);
1938 fallthrough;
1939 case RTW_CHANNEL_WIDTH_20:
1940 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
1941 if (above_2ss)
1942 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
1943 break;
1944 case RTW_CHANNEL_WIDTH_40:
1945
1946 if (above_2ss)
1947 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
1948 break;
1949 }
1950
1951 return tx_power;
1952 }
1953
1954 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
1955 struct rtw_5g_txpwr_idx *pwr_idx_5g,
1956 enum rtw_bandwidth bandwidth,
1957 u8 rate, u8 group)
1958 {
1959 struct rtw_chip_info *chip = rtwdev->chip;
1960 u8 tx_power;
1961 u8 upper, lower;
1962 bool mcs_rate;
1963 bool above_2ss;
1964 u8 factor = chip->txgi_factor;
1965
1966 tx_power = pwr_idx_5g->bw40_base[group];
1967
1968 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1969 (rate >= DESC_RATEVHT1SS_MCS0 &&
1970 rate <= DESC_RATEVHT2SS_MCS9);
1971 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1972 (rate >= DESC_RATEVHT2SS_MCS0);
1973
1974 if (!mcs_rate) {
1975 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
1976 return tx_power;
1977 }
1978
1979 switch (bandwidth) {
1980 default:
1981 WARN_ON(1);
1982 fallthrough;
1983 case RTW_CHANNEL_WIDTH_20:
1984 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
1985 if (above_2ss)
1986 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
1987 break;
1988 case RTW_CHANNEL_WIDTH_40:
1989
1990 if (above_2ss)
1991 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
1992 break;
1993 case RTW_CHANNEL_WIDTH_80:
1994
1995 lower = pwr_idx_5g->bw40_base[group];
1996 upper = pwr_idx_5g->bw40_base[group + 1];
1997
1998 tx_power = (lower + upper) / 2;
1999 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
2000 if (above_2ss)
2001 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
2002 break;
2003 }
2004
2005 return tx_power;
2006 }
2007
2008
2009 static u8 rtw_phy_rate_to_rate_section(u8 rate)
2010 {
2011 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
2012 return RTW_RATE_SECTION_CCK;
2013 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
2014 return RTW_RATE_SECTION_OFDM;
2015 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
2016 return RTW_RATE_SECTION_HT_1S;
2017 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
2018 return RTW_RATE_SECTION_HT_2S;
2019 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
2020 return RTW_RATE_SECTION_VHT_1S;
2021 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
2022 return RTW_RATE_SECTION_VHT_2S;
2023 else
2024 return RTW_RATE_SECTION_MAX;
2025 }
2026
2027 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
2028 enum rtw_bandwidth bw, u8 rf_path,
2029 u8 rate, u8 channel, u8 regd)
2030 {
2031 struct rtw_hal *hal = &rtwdev->hal;
2032 u8 *cch_by_bw = hal->cch_by_bw;
2033 s8 power_limit = (s8)rtwdev->chip->max_power_index;
2034 u8 rs = rtw_phy_rate_to_rate_section(rate);
2035 int ch_idx;
2036 u8 cur_bw, cur_ch;
2037 s8 cur_lmt;
2038
2039 if (regd > RTW_REGD_WW)
2040 return power_limit;
2041
2042 if (rs == RTW_RATE_SECTION_MAX)
2043 goto err;
2044
2045
2046 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM)
2047 bw = RTW_CHANNEL_WIDTH_20;
2048
2049
2050 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
2051 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
2052
2053
2054 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) {
2055 cur_ch = cch_by_bw[cur_bw];
2056
2057 ch_idx = rtw_channel_to_idx(band, cur_ch);
2058 if (ch_idx < 0)
2059 goto err;
2060
2061 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ?
2062 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] :
2063 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx];
2064
2065 power_limit = min_t(s8, cur_lmt, power_limit);
2066 }
2067
2068 return power_limit;
2069
2070 err:
2071 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
2072 band, bw, rf_path, rate, channel);
2073 return (s8)rtwdev->chip->max_power_index;
2074 }
2075
2076 static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band,
2077 u8 rf_path, u8 rate)
2078 {
2079 u8 rs = rtw_phy_rate_to_rate_section(rate);
2080 struct rtw_sar_arg arg = {
2081 .sar_band = sar_band,
2082 .path = rf_path,
2083 .rs = rs,
2084 };
2085
2086 if (rs == RTW_RATE_SECTION_MAX)
2087 goto err;
2088
2089 return rtw_query_sar(rtwdev, &arg);
2090
2091 err:
2092 WARN(1, "invalid arguments, sar_band=%d, path=%d, rate=%d\n",
2093 sar_band, rf_path, rate);
2094 return (s8)rtwdev->chip->max_power_index;
2095 }
2096
2097 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
2098 u8 ch, u8 regd, struct rtw_power_params *pwr_param)
2099 {
2100 struct rtw_hal *hal = &rtwdev->hal;
2101 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2102 struct rtw_txpwr_idx *pwr_idx;
2103 u8 group, band;
2104 u8 *base = &pwr_param->pwr_base;
2105 s8 *offset = &pwr_param->pwr_offset;
2106 s8 *limit = &pwr_param->pwr_limit;
2107 s8 *remnant = &pwr_param->pwr_remnant;
2108 s8 *sar = &pwr_param->pwr_sar;
2109
2110 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
2111 group = rtw_get_channel_group(ch, rate);
2112
2113
2114 if (IS_CH_2G_BAND(ch)) {
2115 band = PHY_BAND_2G;
2116 *base = rtw_phy_get_2g_tx_power_index(rtwdev,
2117 &pwr_idx->pwr_idx_2g,
2118 bw, rate, group);
2119 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate];
2120 } else {
2121 band = PHY_BAND_5G;
2122 *base = rtw_phy_get_5g_tx_power_index(rtwdev,
2123 &pwr_idx->pwr_idx_5g,
2124 bw, rate, group);
2125 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate];
2126 }
2127
2128 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path,
2129 rate, ch, regd);
2130 *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck :
2131 dm_info->txagc_remnant_ofdm);
2132 *sar = rtw_phy_get_tx_power_sar(rtwdev, hal->sar_band, path, rate);
2133 }
2134
2135 u8
2136 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
2137 enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
2138 {
2139 struct rtw_power_params pwr_param = {0};
2140 u8 tx_power;
2141 s8 offset;
2142
2143 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth,
2144 channel, regd, &pwr_param);
2145
2146 tx_power = pwr_param.pwr_base;
2147 offset = min3(pwr_param.pwr_offset,
2148 pwr_param.pwr_limit,
2149 pwr_param.pwr_sar);
2150
2151 if (rtwdev->chip->en_dis_dpd)
2152 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate);
2153
2154 tx_power += offset + pwr_param.pwr_remnant;
2155
2156 if (tx_power > rtwdev->chip->max_power_index)
2157 tx_power = rtwdev->chip->max_power_index;
2158
2159 return tx_power;
2160 }
2161 EXPORT_SYMBOL(rtw_phy_get_tx_power_index);
2162
2163 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
2164 u8 ch, u8 path, u8 rs)
2165 {
2166 struct rtw_hal *hal = &rtwdev->hal;
2167 u8 regd = rtw_regd_get(rtwdev);
2168 u8 *rates;
2169 u8 size;
2170 u8 rate;
2171 u8 pwr_idx;
2172 u8 bw;
2173 int i;
2174
2175 if (rs >= RTW_RATE_SECTION_MAX)
2176 return;
2177
2178 rates = rtw_rate_section[rs];
2179 size = rtw_rate_size[rs];
2180 bw = hal->current_band_width;
2181 for (i = 0; i < size; i++) {
2182 rate = rates[i];
2183 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate,
2184 bw, ch, regd);
2185 hal->tx_pwr_tbl[path][rate] = pwr_idx;
2186 }
2187 }
2188
2189
2190
2191
2192
2193
2194 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
2195 u8 ch, u8 path)
2196 {
2197 struct rtw_hal *hal = &rtwdev->hal;
2198 u8 rs;
2199
2200
2201 if (hal->current_band_type == RTW_BAND_2G)
2202 rs = RTW_RATE_SECTION_CCK;
2203 else
2204 rs = RTW_RATE_SECTION_OFDM;
2205
2206 for (; rs < RTW_RATE_SECTION_MAX; rs++)
2207 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
2208 }
2209
2210 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
2211 {
2212 struct rtw_chip_info *chip = rtwdev->chip;
2213 struct rtw_hal *hal = &rtwdev->hal;
2214 u8 path;
2215
2216 mutex_lock(&hal->tx_power_mutex);
2217
2218 for (path = 0; path < hal->rf_path_num; path++)
2219 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path);
2220
2221 chip->ops->set_tx_power_index(rtwdev);
2222 mutex_unlock(&hal->tx_power_mutex);
2223 }
2224 EXPORT_SYMBOL(rtw_phy_set_tx_power_level);
2225
2226 static void
2227 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
2228 u8 rs, u8 size, u8 *rates)
2229 {
2230 u8 rate;
2231 u8 base_idx, rate_idx;
2232 s8 base_2g, base_5g;
2233
2234 if (rs >= RTW_RATE_SECTION_VHT_1S)
2235 base_idx = rates[size - 3];
2236 else
2237 base_idx = rates[size - 1];
2238 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
2239 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
2240 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
2241 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
2242 for (rate = 0; rate < size; rate++) {
2243 rate_idx = rates[rate];
2244 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
2245 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
2246 }
2247 }
2248
2249 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
2250 {
2251 u8 path;
2252
2253 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
2254 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2255 RTW_RATE_SECTION_CCK,
2256 rtw_cck_size, rtw_cck_rates);
2257 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2258 RTW_RATE_SECTION_OFDM,
2259 rtw_ofdm_size, rtw_ofdm_rates);
2260 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2261 RTW_RATE_SECTION_HT_1S,
2262 rtw_ht_1s_size, rtw_ht_1s_rates);
2263 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2264 RTW_RATE_SECTION_HT_2S,
2265 rtw_ht_2s_size, rtw_ht_2s_rates);
2266 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2267 RTW_RATE_SECTION_VHT_1S,
2268 rtw_vht_1s_size, rtw_vht_1s_rates);
2269 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2270 RTW_RATE_SECTION_VHT_2S,
2271 rtw_vht_2s_size, rtw_vht_2s_rates);
2272 }
2273 }
2274
2275 static void
2276 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
2277 {
2278 s8 base;
2279 u8 ch;
2280
2281 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
2282 base = hal->tx_pwr_by_rate_base_2g[0][rs];
2283 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
2284 }
2285
2286 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
2287 base = hal->tx_pwr_by_rate_base_5g[0][rs];
2288 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
2289 }
2290 }
2291
2292 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
2293 {
2294 u8 regd, bw, rs;
2295
2296
2297 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1;
2298
2299 for (regd = 0; regd < RTW_REGD_MAX; regd++)
2300 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
2301 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
2302 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
2303 }
2304
2305 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev,
2306 u8 regd, u8 bw, u8 rs)
2307 {
2308 struct rtw_hal *hal = &rtwdev->hal;
2309 s8 max_power_index = (s8)rtwdev->chip->max_power_index;
2310 u8 ch;
2311
2312
2313 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
2314 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index;
2315
2316
2317 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
2318 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index;
2319 }
2320
2321 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
2322 {
2323 struct rtw_hal *hal = &rtwdev->hal;
2324 u8 regd, path, rate, rs, bw;
2325
2326
2327 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
2328 for (rate = 0; rate < DESC_RATE_MAX; rate++) {
2329 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
2330 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
2331 }
2332 }
2333
2334
2335 for (regd = 0; regd < RTW_REGD_MAX; regd++)
2336 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
2337 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
2338 rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
2339 rs);
2340 }
2341
2342 void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
2343 struct rtw_swing_table *swing_table)
2344 {
2345 const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
2346 u8 channel = rtwdev->hal.current_channel;
2347
2348 if (IS_CH_2G_BAND(channel)) {
2349 if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) {
2350 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p;
2351 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
2352 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
2353 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
2354 } else {
2355 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
2356 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
2357 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
2358 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
2359 }
2360 } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
2361 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
2362 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
2363 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
2364 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
2365 } else if (IS_CH_5G_BAND_3(channel)) {
2366 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
2367 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
2368 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
2369 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
2370 } else if (IS_CH_5G_BAND_4(channel)) {
2371 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
2372 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
2373 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
2374 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
2375 } else {
2376 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
2377 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
2378 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
2379 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
2380 }
2381 }
2382 EXPORT_SYMBOL(rtw_phy_config_swing_table);
2383
2384 void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
2385 {
2386 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2387
2388 ewma_thermal_add(&dm_info->avg_thermal[path], thermal);
2389 dm_info->thermal_avg[path] =
2390 ewma_thermal_read(&dm_info->avg_thermal[path]);
2391 }
2392 EXPORT_SYMBOL(rtw_phy_pwrtrack_avg);
2393
2394 bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
2395 u8 path)
2396 {
2397 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2398 u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]);
2399
2400 if (avg == thermal)
2401 return false;
2402
2403 return true;
2404 }
2405 EXPORT_SYMBOL(rtw_phy_pwrtrack_thermal_changed);
2406
2407 u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
2408 {
2409 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2410 u8 therm_avg, therm_efuse, therm_delta;
2411
2412 therm_avg = dm_info->thermal_avg[path];
2413 therm_efuse = rtwdev->efuse.thermal_meter[path];
2414 therm_delta = abs(therm_avg - therm_efuse);
2415
2416 return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
2417 }
2418 EXPORT_SYMBOL(rtw_phy_pwrtrack_get_delta);
2419
2420 s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
2421 struct rtw_swing_table *swing_table,
2422 u8 tbl_path, u8 therm_path, u8 delta)
2423 {
2424 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2425 const u8 *delta_swing_table_idx_pos;
2426 const u8 *delta_swing_table_idx_neg;
2427
2428 if (delta >= RTW_PWR_TRK_TBL_SZ) {
2429 rtw_warn(rtwdev, "power track table overflow\n");
2430 return 0;
2431 }
2432
2433 if (!swing_table) {
2434 rtw_warn(rtwdev, "swing table not configured\n");
2435 return 0;
2436 }
2437
2438 delta_swing_table_idx_pos = swing_table->p[tbl_path];
2439 delta_swing_table_idx_neg = swing_table->n[tbl_path];
2440
2441 if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) {
2442 rtw_warn(rtwdev, "invalid swing table index\n");
2443 return 0;
2444 }
2445
2446 if (dm_info->thermal_avg[therm_path] >
2447 rtwdev->efuse.thermal_meter[therm_path])
2448 return delta_swing_table_idx_pos[delta];
2449 else
2450 return -delta_swing_table_idx_neg[delta];
2451 }
2452 EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
2453
2454 bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
2455 {
2456 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2457 u8 delta_lck;
2458
2459 delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
2460 if (delta_lck >= rtwdev->chip->lck_threshold) {
2461 dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
2462 return true;
2463 }
2464 return false;
2465 }
2466 EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
2467
2468 bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
2469 {
2470 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2471 u8 delta_iqk;
2472
2473 delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k);
2474 if (delta_iqk >= rtwdev->chip->iqk_threshold) {
2475 dm_info->thermal_meter_k = dm_info->thermal_avg[0];
2476 return true;
2477 }
2478 return false;
2479 }
2480 EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
2481
2482 static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
2483 enum rtw_bb_path tx_path_sel_1ss)
2484 {
2485 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
2486 enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
2487 struct rtw_chip_info *chip = rtwdev->chip;
2488
2489 if (tx_path_sel_1ss == path_div->current_tx_path)
2490 return;
2491
2492 path_div->current_tx_path = tx_path_sel_1ss;
2493 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "Switch TX path=%s\n",
2494 tx_path_sel_1ss == BB_PATH_A ? "A" : "B");
2495 chip->ops->config_tx_path(rtwdev, rtwdev->hal.antenna_tx,
2496 tx_path_sel_1ss, tx_path_sel_cck, false);
2497 }
2498
2499 static void rtw_phy_tx_path_div_select(struct rtw_dev *rtwdev)
2500 {
2501 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
2502 enum rtw_bb_path path = path_div->current_tx_path;
2503 s32 rssi_a = 0, rssi_b = 0;
2504
2505 if (path_div->path_a_cnt)
2506 rssi_a = path_div->path_a_sum / path_div->path_a_cnt;
2507 else
2508 rssi_a = 0;
2509 if (path_div->path_b_cnt)
2510 rssi_b = path_div->path_b_sum / path_div->path_b_cnt;
2511 else
2512 rssi_b = 0;
2513
2514 if (rssi_a != rssi_b)
2515 path = (rssi_a > rssi_b) ? BB_PATH_A : BB_PATH_B;
2516
2517 path_div->path_a_cnt = 0;
2518 path_div->path_a_sum = 0;
2519 path_div->path_b_cnt = 0;
2520 path_div->path_b_sum = 0;
2521 rtw_phy_set_tx_path_by_reg(rtwdev, path);
2522 }
2523
2524 static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
2525 {
2526 if (rtwdev->hal.antenna_rx != BB_PATH_AB) {
2527 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV,
2528 "[Return] tx_Path_en=%d, rx_Path_en=%d\n",
2529 rtwdev->hal.antenna_tx, rtwdev->hal.antenna_rx);
2530 return;
2531 }
2532 if (rtwdev->sta_cnt == 0) {
2533 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "No Link\n");
2534 return;
2535 }
2536
2537 rtw_phy_tx_path_div_select(rtwdev);
2538 }
2539
2540 void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
2541 {
2542 struct rtw_chip_info *chip = rtwdev->chip;
2543
2544 if (!chip->path_div_supported)
2545 return;
2546
2547 rtw_phy_tx_path_diversity_2ss(rtwdev);
2548 }