0001
0002
0003
0004
0005
0006
0007 #include <net/mac80211.h>
0008 #include <linux/netdevice.h>
0009 #include <linux/dmi.h>
0010
0011 #include "iwl-trans.h"
0012 #include "iwl-op-mode.h"
0013 #include "fw/img.h"
0014 #include "iwl-debug.h"
0015 #include "iwl-prph.h"
0016 #include "fw/acpi.h"
0017 #include "fw/pnvm.h"
0018
0019 #include "mvm.h"
0020 #include "fw/dbg.h"
0021 #include "iwl-phy-db.h"
0022 #include "iwl-modparams.h"
0023 #include "iwl-nvm-parse.h"
0024
0025 #define MVM_UCODE_ALIVE_TIMEOUT (HZ)
0026 #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
0027
0028 #define IWL_TAS_US_MCC 0x5553
0029 #define IWL_TAS_CANADA_MCC 0x4341
0030
0031 struct iwl_mvm_alive_data {
0032 bool valid;
0033 u32 scd_base_addr;
0034 };
0035
0036 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
0037 {
0038 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
0039 .valid = cpu_to_le32(valid_tx_ant),
0040 };
0041
0042 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
0043 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
0044 sizeof(tx_ant_cmd), &tx_ant_cmd);
0045 }
0046
0047 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
0048 {
0049 int i;
0050 struct iwl_rss_config_cmd cmd = {
0051 .flags = cpu_to_le32(IWL_RSS_ENABLE),
0052 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
0053 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
0054 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
0055 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
0056 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
0057 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
0058 };
0059
0060 if (mvm->trans->num_rx_queues == 1)
0061 return 0;
0062
0063
0064 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
0065 cmd.indirection_table[i] =
0066 1 + (i % (mvm->trans->num_rx_queues - 1));
0067 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
0068
0069 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
0070 }
0071
0072 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
0073 {
0074 struct iwl_dqa_enable_cmd dqa_cmd = {
0075 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
0076 };
0077 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
0078 int ret;
0079
0080 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
0081 if (ret)
0082 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
0083 else
0084 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
0085
0086 return ret;
0087 }
0088
0089 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
0090 struct iwl_rx_cmd_buffer *rxb)
0091 {
0092 struct iwl_rx_packet *pkt = rxb_addr(rxb);
0093 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
0094 __le32 *dump_data = mfu_dump_notif->data;
0095 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
0096 int i;
0097
0098 if (mfu_dump_notif->index_num == 0)
0099 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
0100 le32_to_cpu(mfu_dump_notif->assert_id));
0101
0102 for (i = 0; i < n_words; i++)
0103 IWL_DEBUG_INFO(mvm,
0104 "MFUART assert dump, dword %u: 0x%08x\n",
0105 le16_to_cpu(mfu_dump_notif->index_num) *
0106 n_words + i,
0107 le32_to_cpu(dump_data[i]));
0108 }
0109
0110 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
0111 struct iwl_rx_packet *pkt, void *data)
0112 {
0113 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
0114 struct iwl_mvm *mvm =
0115 container_of(notif_wait, struct iwl_mvm, notif_wait);
0116 struct iwl_mvm_alive_data *alive_data = data;
0117 struct iwl_umac_alive *umac;
0118 struct iwl_lmac_alive *lmac1;
0119 struct iwl_lmac_alive *lmac2 = NULL;
0120 u16 status;
0121 u32 lmac_error_event_table, umac_error_table;
0122 u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
0123 UCODE_ALIVE_NTFY, 0);
0124 u32 i;
0125
0126 if (version == 6) {
0127 struct iwl_alive_ntf_v6 *palive;
0128
0129 if (pkt_len < sizeof(*palive))
0130 return false;
0131
0132 palive = (void *)pkt->data;
0133 mvm->trans->dbg.imr_data.imr_enable =
0134 le32_to_cpu(palive->imr.enabled);
0135 mvm->trans->dbg.imr_data.imr_size =
0136 le32_to_cpu(palive->imr.size);
0137 mvm->trans->dbg.imr_data.imr2sram_remainbyte =
0138 mvm->trans->dbg.imr_data.imr_size;
0139 mvm->trans->dbg.imr_data.imr_base_addr =
0140 palive->imr.base_addr;
0141 mvm->trans->dbg.imr_data.imr_curr_addr =
0142 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
0143 IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
0144 mvm->trans->dbg.imr_data.imr_enable,
0145 mvm->trans->dbg.imr_data.imr_size,
0146 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
0147
0148 if (!mvm->trans->dbg.imr_data.imr_enable) {
0149 for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
0150 struct iwl_ucode_tlv *reg_tlv;
0151 struct iwl_fw_ini_region_tlv *reg;
0152
0153 reg_tlv = mvm->trans->dbg.active_regions[i];
0154 if (!reg_tlv)
0155 continue;
0156
0157 reg = (void *)reg_tlv->data;
0158
0159
0160
0161
0162
0163 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
0164 mvm->trans->dbg.unsupported_region_msk |= BIT(i);
0165 break;
0166 }
0167 }
0168 }
0169 }
0170
0171 if (version >= 5) {
0172 struct iwl_alive_ntf_v5 *palive;
0173
0174 if (pkt_len < sizeof(*palive))
0175 return false;
0176
0177 palive = (void *)pkt->data;
0178 umac = &palive->umac_data;
0179 lmac1 = &palive->lmac_data[0];
0180 lmac2 = &palive->lmac_data[1];
0181 status = le16_to_cpu(palive->status);
0182
0183 mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
0184 mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
0185 mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
0186
0187 IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
0188 mvm->trans->sku_id[0],
0189 mvm->trans->sku_id[1],
0190 mvm->trans->sku_id[2]);
0191 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
0192 struct iwl_alive_ntf_v4 *palive;
0193
0194 if (pkt_len < sizeof(*palive))
0195 return false;
0196
0197 palive = (void *)pkt->data;
0198 umac = &palive->umac_data;
0199 lmac1 = &palive->lmac_data[0];
0200 lmac2 = &palive->lmac_data[1];
0201 status = le16_to_cpu(palive->status);
0202 } else if (iwl_rx_packet_payload_len(pkt) ==
0203 sizeof(struct iwl_alive_ntf_v3)) {
0204 struct iwl_alive_ntf_v3 *palive3;
0205
0206 if (pkt_len < sizeof(*palive3))
0207 return false;
0208
0209 palive3 = (void *)pkt->data;
0210 umac = &palive3->umac_data;
0211 lmac1 = &palive3->lmac_data;
0212 status = le16_to_cpu(palive3->status);
0213 } else {
0214 WARN(1, "unsupported alive notification (size %d)\n",
0215 iwl_rx_packet_payload_len(pkt));
0216
0217 return false;
0218 }
0219
0220 lmac_error_event_table =
0221 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
0222 iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
0223
0224 if (lmac2)
0225 mvm->trans->dbg.lmac_error_event_table[1] =
0226 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
0227
0228 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
0229 ~FW_ADDR_CACHE_CONTROL;
0230
0231 if (umac_error_table) {
0232 if (umac_error_table >=
0233 mvm->trans->cfg->min_umac_error_event_table) {
0234 iwl_fw_umac_set_alive_err_table(mvm->trans,
0235 umac_error_table);
0236 } else {
0237 IWL_ERR(mvm,
0238 "Not valid error log pointer 0x%08X for %s uCode\n",
0239 umac_error_table,
0240 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
0241 "Init" : "RT");
0242 }
0243 }
0244
0245 alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
0246 alive_data->valid = status == IWL_ALIVE_STATUS_OK;
0247
0248 IWL_DEBUG_FW(mvm,
0249 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
0250 status, lmac1->ver_type, lmac1->ver_subtype);
0251
0252 if (lmac2)
0253 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
0254
0255 IWL_DEBUG_FW(mvm,
0256 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
0257 le32_to_cpu(umac->umac_major),
0258 le32_to_cpu(umac->umac_minor));
0259
0260 iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
0261
0262 return true;
0263 }
0264
0265 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
0266 struct iwl_rx_packet *pkt, void *data)
0267 {
0268 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
0269
0270 return true;
0271 }
0272
0273 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
0274 struct iwl_rx_packet *pkt, void *data)
0275 {
0276 struct iwl_phy_db *phy_db = data;
0277
0278 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
0279 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
0280 return true;
0281 }
0282
0283 WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
0284
0285 return false;
0286 }
0287
0288 static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
0289 {
0290 #define IWL_FW_PRINT_REG_INFO(reg_name) \
0291 IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
0292
0293 struct iwl_trans *trans = mvm->trans;
0294 enum iwl_device_family device_family = trans->trans_cfg->device_family;
0295
0296 if (device_family < IWL_DEVICE_FAMILY_8000)
0297 return;
0298
0299 if (device_family <= IWL_DEVICE_FAMILY_9000)
0300 IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
0301 else
0302 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
0303
0304 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
0305
0306
0307 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
0308 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
0309 }
0310
0311 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
0312 enum iwl_ucode_type ucode_type)
0313 {
0314 struct iwl_notification_wait alive_wait;
0315 struct iwl_mvm_alive_data alive_data = {};
0316 const struct fw_img *fw;
0317 int ret;
0318 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
0319 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
0320 bool run_in_rfkill =
0321 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
0322
0323 if (ucode_type == IWL_UCODE_REGULAR &&
0324 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
0325 !(fw_has_capa(&mvm->fw->ucode_capa,
0326 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
0327 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
0328 else
0329 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
0330 if (WARN_ON(!fw))
0331 return -EINVAL;
0332 iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
0333 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
0334
0335 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
0336 alive_cmd, ARRAY_SIZE(alive_cmd),
0337 iwl_alive_fn, &alive_data);
0338
0339
0340
0341
0342
0343
0344 ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
0345 if (ret) {
0346 iwl_fw_set_current_image(&mvm->fwrt, old_type);
0347 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
0348 return ret;
0349 }
0350
0351
0352
0353
0354
0355 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
0356 MVM_UCODE_ALIVE_TIMEOUT);
0357 if (ret) {
0358 struct iwl_trans *trans = mvm->trans;
0359
0360
0361 if (trans->trans_cfg->device_family >=
0362 IWL_DEVICE_FAMILY_22000) {
0363 IWL_ERR(mvm,
0364 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
0365 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
0366 iwl_read_umac_prph(trans,
0367 UMAG_SB_CPU_2_STATUS));
0368 } else if (trans->trans_cfg->device_family >=
0369 IWL_DEVICE_FAMILY_8000) {
0370 IWL_ERR(mvm,
0371 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
0372 iwl_read_prph(trans, SB_CPU_1_STATUS),
0373 iwl_read_prph(trans, SB_CPU_2_STATUS));
0374 }
0375
0376 iwl_mvm_print_pd_notification(mvm);
0377
0378
0379 if (trans->trans_cfg->device_family >=
0380 IWL_DEVICE_FAMILY_9000) {
0381 IWL_ERR(mvm, "UMAC PC: 0x%x\n",
0382 iwl_read_umac_prph(trans,
0383 UREG_UMAC_CURRENT_PC));
0384 IWL_ERR(mvm, "LMAC PC: 0x%x\n",
0385 iwl_read_umac_prph(trans,
0386 UREG_LMAC1_CURRENT_PC));
0387 if (iwl_mvm_is_cdb_supported(mvm))
0388 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
0389 iwl_read_umac_prph(trans,
0390 UREG_LMAC2_CURRENT_PC));
0391 }
0392
0393 if (ret == -ETIMEDOUT)
0394 iwl_fw_dbg_error_collect(&mvm->fwrt,
0395 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
0396
0397 iwl_fw_set_current_image(&mvm->fwrt, old_type);
0398 return ret;
0399 }
0400
0401 if (!alive_data.valid) {
0402 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
0403 iwl_fw_set_current_image(&mvm->fwrt, old_type);
0404 return -EIO;
0405 }
0406
0407 ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
0408 if (ret) {
0409 IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
0410 iwl_fw_set_current_image(&mvm->fwrt, old_type);
0411 return ret;
0412 }
0413
0414 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
0426
0427
0428
0429
0430
0431
0432 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
0433 BIT(IWL_MAX_TID_COUNT + 2);
0434
0435 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
0436 #ifdef CONFIG_IWLWIFI_DEBUGFS
0437 iwl_fw_set_dbg_rec_on(&mvm->fwrt);
0438 #endif
0439
0440
0441
0442
0443
0444
0445
0446 cfg80211_bss_flush(mvm->hw->wiphy);
0447
0448 return 0;
0449 }
0450
0451 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
0452 {
0453 struct iwl_notification_wait init_wait;
0454 struct iwl_nvm_access_complete_cmd nvm_complete = {};
0455 struct iwl_init_extended_cfg_cmd init_cfg = {
0456 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
0457 };
0458 static const u16 init_complete[] = {
0459 INIT_COMPLETE_NOTIF,
0460 };
0461 int ret;
0462
0463 if (mvm->trans->cfg->tx_with_siso_diversity)
0464 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
0465
0466 lockdep_assert_held(&mvm->mutex);
0467
0468 mvm->rfkill_safe_init_done = false;
0469
0470 iwl_init_notification_wait(&mvm->notif_wait,
0471 &init_wait,
0472 init_complete,
0473 ARRAY_SIZE(init_complete),
0474 iwl_wait_init_complete,
0475 NULL);
0476
0477 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
0478
0479
0480 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
0481 if (ret) {
0482 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
0483 goto error;
0484 }
0485 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
0486 NULL);
0487
0488
0489
0490
0491 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
0492 INIT_EXTENDED_CFG_CMD),
0493 CMD_SEND_IN_RFKILL,
0494 sizeof(init_cfg), &init_cfg);
0495 if (ret) {
0496 IWL_ERR(mvm, "Failed to run init config command: %d\n",
0497 ret);
0498 goto error;
0499 }
0500
0501
0502 if (mvm->nvm_file_name) {
0503 ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
0504 mvm->nvm_sections);
0505 if (ret)
0506 goto error;
0507 ret = iwl_mvm_load_nvm_to_nic(mvm);
0508 if (ret)
0509 goto error;
0510 }
0511
0512 if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
0513 ret = iwl_nvm_init(mvm);
0514 if (ret) {
0515 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
0516 goto error;
0517 }
0518 }
0519
0520 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
0521 NVM_ACCESS_COMPLETE),
0522 CMD_SEND_IN_RFKILL,
0523 sizeof(nvm_complete), &nvm_complete);
0524 if (ret) {
0525 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
0526 ret);
0527 goto error;
0528 }
0529
0530
0531 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
0532 MVM_UCODE_ALIVE_TIMEOUT);
0533 if (ret)
0534 return ret;
0535
0536
0537 if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
0538 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
0539 if (IS_ERR(mvm->nvm_data)) {
0540 ret = PTR_ERR(mvm->nvm_data);
0541 mvm->nvm_data = NULL;
0542 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
0543 return ret;
0544 }
0545 }
0546
0547 mvm->rfkill_safe_init_done = true;
0548
0549 return 0;
0550
0551 error:
0552 iwl_remove_notification(&mvm->notif_wait, &init_wait);
0553 return ret;
0554 }
0555
0556 #ifdef CONFIG_ACPI
0557 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
0558 struct iwl_phy_specific_cfg *phy_filters)
0559 {
0560
0561
0562
0563
0564
0565
0566 if (IWL_MVM_PHY_FILTER_CHAIN_A) {
0567 phy_filters->filter_cfg_chain_a =
0568 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
0569 }
0570 if (IWL_MVM_PHY_FILTER_CHAIN_B) {
0571 phy_filters->filter_cfg_chain_b =
0572 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
0573 }
0574 if (IWL_MVM_PHY_FILTER_CHAIN_C) {
0575 phy_filters->filter_cfg_chain_c =
0576 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
0577 }
0578 if (IWL_MVM_PHY_FILTER_CHAIN_D) {
0579 phy_filters->filter_cfg_chain_d =
0580 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
0581 }
0582 }
0583 #else
0584
0585 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
0586 struct iwl_phy_specific_cfg *phy_filters)
0587 {
0588 }
0589 #endif
0590
0591 #if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
0592 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
0593 {
0594 u8 cmd_ver;
0595 int ret;
0596 struct iwl_host_cmd cmd = {
0597 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
0598 SAR_OFFSET_MAPPING_TABLE_CMD),
0599 .flags = 0,
0600 .data[0] = &mvm->fwrt.sgom_table,
0601 .len[0] = sizeof(mvm->fwrt.sgom_table),
0602 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
0603 };
0604
0605 if (!mvm->fwrt.sgom_enabled) {
0606 IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
0607 return 0;
0608 }
0609
0610 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
0611 IWL_FW_CMD_VER_UNKNOWN);
0612
0613 if (cmd_ver != 2) {
0614 IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
0615 cmd_ver);
0616 return 0;
0617 }
0618
0619 ret = iwl_mvm_send_cmd(mvm, &cmd);
0620 if (ret < 0)
0621 IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
0622
0623 return ret;
0624 }
0625 #else
0626
0627 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
0628 {
0629 return 0;
0630 }
0631 #endif
0632
0633 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
0634 {
0635 u32 cmd_id = PHY_CONFIGURATION_CMD;
0636 struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
0637 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
0638 struct iwl_phy_specific_cfg phy_filters = {};
0639 u8 cmd_ver;
0640 size_t cmd_size;
0641
0642 if (iwl_mvm_has_unified_ucode(mvm) &&
0643 !mvm->trans->cfg->tx_with_siso_diversity)
0644 return 0;
0645
0646 if (mvm->trans->cfg->tx_with_siso_diversity) {
0647
0648
0649
0650
0651 phy_cfg_cmd.phy_cfg =
0652 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
0653 }
0654
0655
0656 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
0657
0658
0659 phy_cfg_cmd.phy_cfg |=
0660 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
0661
0662 phy_cfg_cmd.calib_control.event_trigger =
0663 mvm->fw->default_calib[ucode_type].event_trigger;
0664 phy_cfg_cmd.calib_control.flow_trigger =
0665 mvm->fw->default_calib[ucode_type].flow_trigger;
0666
0667 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
0668 IWL_FW_CMD_VER_UNKNOWN);
0669 if (cmd_ver == 3) {
0670 iwl_mvm_phy_filter_init(mvm, &phy_filters);
0671 memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
0672 sizeof(struct iwl_phy_specific_cfg));
0673 }
0674
0675 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
0676 phy_cfg_cmd.phy_cfg);
0677 cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
0678 sizeof(struct iwl_phy_cfg_cmd_v1);
0679 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
0680 }
0681
0682 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
0683 {
0684 struct iwl_notification_wait calib_wait;
0685 static const u16 init_complete[] = {
0686 INIT_COMPLETE_NOTIF,
0687 CALIB_RES_NOTIF_PHY_DB
0688 };
0689 int ret;
0690
0691 if (iwl_mvm_has_unified_ucode(mvm))
0692 return iwl_run_unified_mvm_ucode(mvm);
0693
0694 lockdep_assert_held(&mvm->mutex);
0695
0696 mvm->rfkill_safe_init_done = false;
0697
0698 iwl_init_notification_wait(&mvm->notif_wait,
0699 &calib_wait,
0700 init_complete,
0701 ARRAY_SIZE(init_complete),
0702 iwl_wait_phy_db_entry,
0703 mvm->phy_db);
0704
0705 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
0706
0707
0708 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
0709 if (ret) {
0710 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
0711 goto remove_notif;
0712 }
0713
0714 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
0715 ret = iwl_mvm_send_bt_init_conf(mvm);
0716 if (ret)
0717 goto remove_notif;
0718 }
0719
0720
0721 if (!mvm->nvm_data) {
0722 ret = iwl_nvm_init(mvm);
0723 if (ret) {
0724 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
0725 goto remove_notif;
0726 }
0727 }
0728
0729
0730 if (mvm->nvm_file_name) {
0731 ret = iwl_mvm_load_nvm_to_nic(mvm);
0732 if (ret)
0733 goto remove_notif;
0734 }
0735
0736 WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
0737 "Too old NVM version (0x%0x, required = 0x%0x)",
0738 mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
0739
0740
0741
0742
0743
0744 if (iwl_mvm_is_radio_hw_killed(mvm)) {
0745 IWL_DEBUG_RF_KILL(mvm,
0746 "jump over all phy activities due to RF kill\n");
0747 goto remove_notif;
0748 }
0749
0750 mvm->rfkill_safe_init_done = true;
0751
0752
0753 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
0754 if (ret)
0755 goto remove_notif;
0756
0757 ret = iwl_send_phy_cfg_cmd(mvm);
0758 if (ret) {
0759 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
0760 ret);
0761 goto remove_notif;
0762 }
0763
0764
0765
0766
0767
0768 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
0769 MVM_UCODE_CALIB_TIMEOUT);
0770 if (!ret)
0771 goto out;
0772
0773 if (iwl_mvm_is_radio_hw_killed(mvm)) {
0774 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
0775 ret = 0;
0776 } else {
0777 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
0778 ret);
0779 }
0780
0781 goto out;
0782
0783 remove_notif:
0784 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
0785 out:
0786 mvm->rfkill_safe_init_done = false;
0787 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
0788
0789 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
0790 sizeof(struct ieee80211_channel) +
0791 sizeof(struct ieee80211_rate),
0792 GFP_KERNEL);
0793 if (!mvm->nvm_data)
0794 return -ENOMEM;
0795 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
0796 mvm->nvm_data->bands[0].n_channels = 1;
0797 mvm->nvm_data->bands[0].n_bitrates = 1;
0798 mvm->nvm_data->bands[0].bitrates =
0799 (void *)((u8 *)mvm->nvm_data->channels + 1);
0800 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
0801 }
0802
0803 return ret;
0804 }
0805
0806 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
0807 {
0808 struct iwl_ltr_config_cmd cmd = {
0809 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
0810 };
0811
0812 if (!mvm->trans->ltr_enabled)
0813 return 0;
0814
0815 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
0816 sizeof(cmd), &cmd);
0817 }
0818
0819 #ifdef CONFIG_ACPI
0820 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
0821 {
0822 u32 cmd_id = REDUCE_TX_POWER_CMD;
0823 struct iwl_dev_tx_power_cmd cmd = {
0824 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
0825 };
0826 __le16 *per_chain;
0827 int ret;
0828 u16 len = 0;
0829 u32 n_subbands;
0830 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
0831 IWL_FW_CMD_VER_UNKNOWN);
0832 if (cmd_ver == 7) {
0833 len = sizeof(cmd.v7);
0834 n_subbands = IWL_NUM_SUB_BANDS_V2;
0835 per_chain = cmd.v7.per_chain[0][0];
0836 cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
0837 } else if (cmd_ver == 6) {
0838 len = sizeof(cmd.v6);
0839 n_subbands = IWL_NUM_SUB_BANDS_V2;
0840 per_chain = cmd.v6.per_chain[0][0];
0841 } else if (fw_has_api(&mvm->fw->ucode_capa,
0842 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
0843 len = sizeof(cmd.v5);
0844 n_subbands = IWL_NUM_SUB_BANDS_V1;
0845 per_chain = cmd.v5.per_chain[0][0];
0846 } else if (fw_has_capa(&mvm->fw->ucode_capa,
0847 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
0848 len = sizeof(cmd.v4);
0849 n_subbands = IWL_NUM_SUB_BANDS_V1;
0850 per_chain = cmd.v4.per_chain[0][0];
0851 } else {
0852 len = sizeof(cmd.v3);
0853 n_subbands = IWL_NUM_SUB_BANDS_V1;
0854 per_chain = cmd.v3.per_chain[0][0];
0855 }
0856
0857
0858 len += sizeof(cmd.common);
0859
0860 ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
0861 IWL_NUM_CHAIN_TABLES,
0862 n_subbands, prof_a, prof_b);
0863
0864
0865 if (ret)
0866 return ret;
0867
0868 iwl_mei_set_power_limit(per_chain);
0869
0870 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
0871 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
0872 }
0873
0874 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
0875 {
0876 union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
0877 struct iwl_geo_tx_power_profiles_resp *resp;
0878 u16 len;
0879 int ret;
0880 struct iwl_host_cmd cmd = {
0881 .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
0882 .flags = CMD_WANT_SKB,
0883 .data = { &geo_tx_cmd },
0884 };
0885 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
0886 IWL_FW_CMD_VER_UNKNOWN);
0887
0888
0889 geo_tx_cmd.v1.ops =
0890 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
0891
0892 if (cmd_ver == 5)
0893 len = sizeof(geo_tx_cmd.v5);
0894 else if (cmd_ver == 4)
0895 len = sizeof(geo_tx_cmd.v4);
0896 else if (cmd_ver == 3)
0897 len = sizeof(geo_tx_cmd.v3);
0898 else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
0899 IWL_UCODE_TLV_API_SAR_TABLE_VER))
0900 len = sizeof(geo_tx_cmd.v2);
0901 else
0902 len = sizeof(geo_tx_cmd.v1);
0903
0904 if (!iwl_sar_geo_support(&mvm->fwrt))
0905 return -EOPNOTSUPP;
0906
0907 cmd.len[0] = len;
0908
0909 ret = iwl_mvm_send_cmd(mvm, &cmd);
0910 if (ret) {
0911 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
0912 return ret;
0913 }
0914
0915 resp = (void *)cmd.resp_pkt->data;
0916 ret = le32_to_cpu(resp->profile_idx);
0917
0918 if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
0919 ret = -EIO;
0920
0921 iwl_free_resp(&cmd);
0922 return ret;
0923 }
0924
0925 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
0926 {
0927 u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
0928 union iwl_geo_tx_power_profiles_cmd cmd;
0929 u16 len;
0930 u32 n_bands;
0931 u32 n_profiles;
0932 u32 sk = 0;
0933 int ret;
0934 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
0935 IWL_FW_CMD_VER_UNKNOWN);
0936
0937 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
0938 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
0939 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
0940 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
0941 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
0942 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
0943 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
0944 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
0945
0946
0947 cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
0948
0949 if (cmd_ver == 5) {
0950 len = sizeof(cmd.v5);
0951 n_bands = ARRAY_SIZE(cmd.v5.table[0]);
0952 n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
0953 } else if (cmd_ver == 4) {
0954 len = sizeof(cmd.v4);
0955 n_bands = ARRAY_SIZE(cmd.v4.table[0]);
0956 n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
0957 } else if (cmd_ver == 3) {
0958 len = sizeof(cmd.v3);
0959 n_bands = ARRAY_SIZE(cmd.v3.table[0]);
0960 n_profiles = ACPI_NUM_GEO_PROFILES;
0961 } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
0962 IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
0963 len = sizeof(cmd.v2);
0964 n_bands = ARRAY_SIZE(cmd.v2.table[0]);
0965 n_profiles = ACPI_NUM_GEO_PROFILES;
0966 } else {
0967 len = sizeof(cmd.v1);
0968 n_bands = ARRAY_SIZE(cmd.v1.table[0]);
0969 n_profiles = ACPI_NUM_GEO_PROFILES;
0970 }
0971
0972 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
0973 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
0974 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
0975 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
0976 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
0977 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
0978 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
0979 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
0980
0981 ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
0982 n_bands, n_profiles);
0983
0984
0985
0986
0987
0988 if (ret)
0989 return 0;
0990
0991
0992 if (mvm->fwrt.geo_rev == 1)
0993 sk = 1;
0994
0995
0996
0997
0998
0999
1000
1001
1002 if (cmd_ver == 5)
1003 cmd.v5.table_revision = cpu_to_le32(sk);
1004 else if (cmd_ver == 4)
1005 cmd.v4.table_revision = cpu_to_le32(sk);
1006 else if (cmd_ver == 3)
1007 cmd.v3.table_revision = cpu_to_le32(sk);
1008 else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
1009 IWL_UCODE_TLV_API_SAR_TABLE_VER))
1010 cmd.v2.table_revision = cpu_to_le32(sk);
1011
1012 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
1013 }
1014
1015 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1016 {
1017 union iwl_ppag_table_cmd cmd;
1018 int ret, cmd_size;
1019
1020 ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
1021
1022 if(ret < 0)
1023 return 0;
1024
1025 IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
1026 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
1027 PER_PLATFORM_ANT_GAIN_CMD),
1028 0, cmd_size, &cmd);
1029 if (ret < 0)
1030 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
1031 ret);
1032
1033 return ret;
1034 }
1035
1036 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1037 {
1038
1039 if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
1040 return 0;
1041
1042 return iwl_mvm_ppag_send_cmd(mvm);
1043 }
1044
1045 static const struct dmi_system_id dmi_tas_approved_list[] = {
1046 { .ident = "HP",
1047 .matches = {
1048 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1049 },
1050 },
1051 { .ident = "SAMSUNG",
1052 .matches = {
1053 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
1054 },
1055 },
1056 { .ident = "LENOVO",
1057 .matches = {
1058 DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
1059 },
1060 },
1061 { .ident = "DELL",
1062 .matches = {
1063 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1064 },
1065 },
1066
1067
1068 {}
1069 };
1070
1071 static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
1072 {
1073 int i;
1074 u32 size = le32_to_cpu(*le_size);
1075
1076
1077 if (size >= IWL_TAS_BLOCK_LIST_MAX)
1078 return false;
1079
1080 for (i = 0; i < size; i++) {
1081 if (list[i] == cpu_to_le32(mcc))
1082 return true;
1083 }
1084
1085 list[size++] = cpu_to_le32(mcc);
1086 *le_size = cpu_to_le32(size);
1087 return true;
1088 }
1089
1090 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1091 {
1092 u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
1093 int ret;
1094 union iwl_tas_config_cmd cmd = {};
1095 int cmd_size, fw_ver;
1096
1097 BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
1098 APCI_WTAS_BLACK_LIST_MAX);
1099
1100 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
1101 IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
1102 return;
1103 }
1104
1105 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1106 IWL_FW_CMD_VER_UNKNOWN);
1107
1108 ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
1109 if (ret < 0) {
1110 IWL_DEBUG_RADIO(mvm,
1111 "TAS table invalid or unavailable. (%d)\n",
1112 ret);
1113 return;
1114 }
1115
1116 if (ret == 0)
1117 return;
1118
1119 if (!dmi_check_system(dmi_tas_approved_list)) {
1120 IWL_DEBUG_RADIO(mvm,
1121 "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
1122 dmi_get_system_info(DMI_SYS_VENDOR));
1123 if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
1124 &cmd.v4.block_list_size,
1125 IWL_TAS_US_MCC)) ||
1126 (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
1127 &cmd.v4.block_list_size,
1128 IWL_TAS_CANADA_MCC))) {
1129 IWL_DEBUG_RADIO(mvm,
1130 "Unable to add US/Canada to TAS block list, disabling TAS\n");
1131 return;
1132 }
1133 }
1134
1135
1136 cmd_size = fw_ver < 3 ?
1137 sizeof(struct iwl_tas_config_cmd_v2) :
1138 sizeof(struct iwl_tas_config_cmd_v3);
1139
1140 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
1141 if (ret < 0)
1142 IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
1143 }
1144
1145 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
1146 {
1147 u8 value;
1148 int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
1149 &iwl_rfi_guid, &value);
1150
1151 if (ret < 0) {
1152 IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
1153
1154 } else if (value >= DSM_VALUE_RFI_MAX) {
1155 IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
1156 value);
1157
1158 } else if (value == DSM_VALUE_RFI_ENABLE) {
1159 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
1160 return DSM_VALUE_RFI_ENABLE;
1161 }
1162
1163 IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
1164
1165
1166 return DSM_VALUE_RFI_DISABLE;
1167 }
1168
1169 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1170 {
1171 int ret;
1172 u32 value;
1173 struct iwl_lari_config_change_cmd_v6 cmd = {};
1174
1175 cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
1176
1177 ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
1178 &iwl_guid, &value);
1179 if (!ret)
1180 cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
1181
1182 ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1183 DSM_FUNC_ENABLE_UNII4_CHAN,
1184 &iwl_guid, &value);
1185 if (!ret)
1186 cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
1187
1188 ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1189 DSM_FUNC_ACTIVATE_CHANNEL,
1190 &iwl_guid, &value);
1191 if (!ret)
1192 cmd.chan_state_active_bitmap = cpu_to_le32(value);
1193
1194 ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1195 DSM_FUNC_ENABLE_6E,
1196 &iwl_guid, &value);
1197 if (!ret)
1198 cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
1199
1200 ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
1201 DSM_FUNC_FORCE_DISABLE_CHANNELS,
1202 &iwl_guid, &value);
1203 if (!ret)
1204 cmd.force_disable_channels_bitmap = cpu_to_le32(value);
1205
1206 if (cmd.config_bitmap ||
1207 cmd.oem_uhb_allow_bitmap ||
1208 cmd.oem_11ax_allow_bitmap ||
1209 cmd.oem_unii4_allow_bitmap ||
1210 cmd.chan_state_active_bitmap ||
1211 cmd.force_disable_channels_bitmap) {
1212 size_t cmd_size;
1213 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
1214 WIDE_ID(REGULATORY_AND_NVM_GROUP,
1215 LARI_CONFIG_CHANGE),
1216 1);
1217 switch (cmd_ver) {
1218 case 6:
1219 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
1220 break;
1221 case 5:
1222 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
1223 break;
1224 case 4:
1225 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
1226 break;
1227 case 3:
1228 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
1229 break;
1230 case 2:
1231 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
1232 break;
1233 default:
1234 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
1235 break;
1236 }
1237
1238 IWL_DEBUG_RADIO(mvm,
1239 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
1240 le32_to_cpu(cmd.config_bitmap),
1241 le32_to_cpu(cmd.oem_11ax_allow_bitmap));
1242 IWL_DEBUG_RADIO(mvm,
1243 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
1244 le32_to_cpu(cmd.oem_unii4_allow_bitmap),
1245 le32_to_cpu(cmd.chan_state_active_bitmap),
1246 cmd_ver);
1247 IWL_DEBUG_RADIO(mvm,
1248 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
1249 le32_to_cpu(cmd.oem_uhb_allow_bitmap),
1250 le32_to_cpu(cmd.force_disable_channels_bitmap));
1251 ret = iwl_mvm_send_cmd_pdu(mvm,
1252 WIDE_ID(REGULATORY_AND_NVM_GROUP,
1253 LARI_CONFIG_CHANGE),
1254 0, cmd_size, &cmd);
1255 if (ret < 0)
1256 IWL_DEBUG_RADIO(mvm,
1257 "Failed to send LARI_CONFIG_CHANGE (%d)\n",
1258 ret);
1259 }
1260 }
1261
1262 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
1263 {
1264 int ret;
1265
1266
1267 ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
1268 if (ret < 0) {
1269 IWL_DEBUG_RADIO(mvm,
1270 "PPAG BIOS table invalid or unavailable. (%d)\n",
1271 ret);
1272 }
1273
1274
1275 ret = iwl_sar_get_wrds_table(&mvm->fwrt);
1276 if (ret < 0) {
1277 IWL_DEBUG_RADIO(mvm,
1278 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1279 ret);
1280
1281
1282
1283
1284 if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
1285
1286
1287
1288
1289
1290
1291 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1292 }
1293
1294 } else {
1295 ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
1296
1297
1298 if (ret < 0)
1299 IWL_DEBUG_RADIO(mvm,
1300 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1301 ret);
1302
1303
1304 if (iwl_sar_geo_support(&mvm->fwrt)) {
1305 ret = iwl_sar_get_wgds_table(&mvm->fwrt);
1306 if (ret < 0)
1307 IWL_DEBUG_RADIO(mvm,
1308 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
1309 ret);
1310
1311 }
1312 }
1313 }
1314 #else
1315
1316 inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
1317 int prof_a, int prof_b)
1318 {
1319 return 1;
1320 }
1321
1322 inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1323 {
1324 return -ENOENT;
1325 }
1326
1327 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1328 {
1329 return 0;
1330 }
1331
1332 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1333 {
1334 return -ENOENT;
1335 }
1336
1337 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1338 {
1339 return 0;
1340 }
1341
1342 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1343 {
1344 }
1345
1346 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1347 {
1348 }
1349
1350 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
1351 {
1352 return DSM_VALUE_RFI_DISABLE;
1353 }
1354
1355 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
1356 {
1357 }
1358
1359 #endif
1360
1361 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
1362 {
1363 u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
1364 int ret;
1365 u32 resp;
1366
1367 struct iwl_fw_error_recovery_cmd recovery_cmd = {
1368 .flags = cpu_to_le32(flags),
1369 .buf_size = 0,
1370 };
1371 struct iwl_host_cmd host_cmd = {
1372 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
1373 .flags = CMD_WANT_SKB,
1374 .data = {&recovery_cmd, },
1375 .len = {sizeof(recovery_cmd), },
1376 };
1377
1378
1379 if (!error_log_size)
1380 return;
1381
1382 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1383
1384 if (!mvm->error_recovery_buf)
1385 return;
1386
1387 host_cmd.data[1] = mvm->error_recovery_buf;
1388 host_cmd.len[1] = error_log_size;
1389 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1390 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1391 }
1392
1393 ret = iwl_mvm_send_cmd(mvm, &host_cmd);
1394 kfree(mvm->error_recovery_buf);
1395 mvm->error_recovery_buf = NULL;
1396
1397 if (ret) {
1398 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1399 return;
1400 }
1401
1402
1403 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1404 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
1405 if (resp)
1406 IWL_ERR(mvm,
1407 "Failed to send recovery cmd blob was invalid %d\n",
1408 resp);
1409 }
1410 }
1411
1412 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1413 {
1414 return iwl_mvm_sar_select_profile(mvm, 1, 1);
1415 }
1416
1417 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1418 {
1419 int ret;
1420
1421 if (iwl_mvm_has_unified_ucode(mvm))
1422 return iwl_run_unified_mvm_ucode(mvm);
1423
1424 ret = iwl_run_init_mvm_ucode(mvm);
1425
1426 if (ret) {
1427 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1428
1429 if (iwlmvm_mod_params.init_dbg)
1430 return 0;
1431 return ret;
1432 }
1433
1434 iwl_fw_dbg_stop_sync(&mvm->fwrt);
1435 iwl_trans_stop_device(mvm->trans);
1436 ret = iwl_trans_start_hw(mvm->trans);
1437 if (ret)
1438 return ret;
1439
1440 mvm->rfkill_safe_init_done = false;
1441 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1442 if (ret)
1443 return ret;
1444
1445 mvm->rfkill_safe_init_done = true;
1446
1447 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
1448 NULL);
1449
1450 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1451 }
1452
1453 int iwl_mvm_up(struct iwl_mvm *mvm)
1454 {
1455 int ret, i;
1456 struct ieee80211_channel *chan;
1457 struct cfg80211_chan_def chandef;
1458 struct ieee80211_supported_band *sband = NULL;
1459
1460 lockdep_assert_held(&mvm->mutex);
1461
1462 ret = iwl_trans_start_hw(mvm->trans);
1463 if (ret)
1464 return ret;
1465
1466 ret = iwl_mvm_load_rt_fw(mvm);
1467 if (ret) {
1468 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1469 if (ret != -ERFKILL)
1470 iwl_fw_dbg_error_collect(&mvm->fwrt,
1471 FW_DBG_TRIGGER_DRIVER);
1472 goto error;
1473 }
1474
1475 iwl_get_shared_mem_conf(&mvm->fwrt);
1476
1477 ret = iwl_mvm_sf_update(mvm, NULL, false);
1478 if (ret)
1479 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1480
1481 if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
1482 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1483
1484 if (mvm->fw->dbg.dest_tlv)
1485 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1486 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1487 }
1488
1489 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1490 if (ret)
1491 goto error;
1492
1493 if (!iwl_mvm_has_unified_ucode(mvm)) {
1494
1495 ret = iwl_send_phy_db_data(mvm->phy_db);
1496 if (ret)
1497 goto error;
1498 }
1499
1500 ret = iwl_send_phy_cfg_cmd(mvm);
1501 if (ret)
1502 goto error;
1503
1504 ret = iwl_mvm_send_bt_init_conf(mvm);
1505 if (ret)
1506 goto error;
1507
1508 if (fw_has_capa(&mvm->fw->ucode_capa,
1509 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
1510 ret = iwl_set_soc_latency(&mvm->fwrt);
1511 if (ret)
1512 goto error;
1513 }
1514
1515
1516 ret = iwl_configure_rxq(&mvm->fwrt);
1517 if (ret)
1518 goto error;
1519
1520 if (iwl_mvm_has_new_rx_api(mvm)) {
1521 ret = iwl_send_rss_cfg_cmd(mvm);
1522 if (ret) {
1523 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1524 ret);
1525 goto error;
1526 }
1527 }
1528
1529
1530 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
1531 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1532
1533 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1534
1535
1536 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1537
1538 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
1539 ret = iwl_mvm_send_dqa_cmd(mvm);
1540 if (ret)
1541 goto error;
1542 }
1543
1544
1545
1546
1547
1548
1549
1550 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
1551
1552
1553
1554
1555 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1556 if (ret)
1557 goto error;
1558 }
1559
1560
1561 i = 0;
1562 while (!sband && i < NUM_NL80211_BANDS)
1563 sband = mvm->hw->wiphy->bands[i++];
1564
1565 if (WARN_ON_ONCE(!sband)) {
1566 ret = -ENODEV;
1567 goto error;
1568 }
1569
1570 chan = &sband->channels[0];
1571
1572 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1573 for (i = 0; i < NUM_PHY_CTX; i++) {
1574
1575
1576
1577
1578
1579 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1580 &chandef, 1, 1);
1581 if (ret)
1582 goto error;
1583 }
1584
1585 if (iwl_mvm_is_tt_in_fw(mvm)) {
1586
1587
1588
1589
1590 iwl_mvm_send_temp_report_ths_cmd(mvm);
1591 } else {
1592
1593 iwl_mvm_tt_tx_backoff(mvm, 0);
1594 }
1595
1596 #ifdef CONFIG_THERMAL
1597
1598
1599
1600
1601
1602
1603 if (iwl_mvm_is_ctdp_supported(mvm)) {
1604 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1605 mvm->cooling_dev.cur_state);
1606 if (ret)
1607 goto error;
1608 }
1609 #endif
1610
1611 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
1612 WARN_ON(iwl_mvm_config_ltr(mvm));
1613
1614 ret = iwl_mvm_power_update_device(mvm);
1615 if (ret)
1616 goto error;
1617
1618 iwl_mvm_lari_cfg(mvm);
1619
1620
1621
1622
1623 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1624 ret = iwl_mvm_init_mcc(mvm);
1625 if (ret)
1626 goto error;
1627 }
1628
1629 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1630 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1631 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1632 ret = iwl_mvm_config_scan(mvm);
1633 if (ret)
1634 goto error;
1635 }
1636
1637 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1638 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1639
1640 if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1641 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1642
1643 ret = iwl_mvm_ppag_init(mvm);
1644 if (ret)
1645 goto error;
1646
1647 ret = iwl_mvm_sar_init(mvm);
1648 if (ret == 0)
1649 ret = iwl_mvm_sar_geo_init(mvm);
1650 if (ret < 0)
1651 goto error;
1652
1653 ret = iwl_mvm_sgom_init(mvm);
1654 if (ret)
1655 goto error;
1656
1657 iwl_mvm_tas_init(mvm);
1658 iwl_mvm_leds_sync(mvm);
1659
1660 iwl_mvm_ftm_initiator_smooth_config(mvm);
1661
1662 if (fw_has_capa(&mvm->fw->ucode_capa,
1663 IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
1664 if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
1665 iwl_rfi_send_config_cmd(mvm, NULL);
1666 }
1667
1668 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1669 return 0;
1670 error:
1671 if (!iwlmvm_mod_params.init_dbg || !ret)
1672 iwl_mvm_stop_device(mvm);
1673 return ret;
1674 }
1675
1676 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1677 {
1678 int ret, i;
1679
1680 lockdep_assert_held(&mvm->mutex);
1681
1682 ret = iwl_trans_start_hw(mvm->trans);
1683 if (ret)
1684 return ret;
1685
1686 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1687 if (ret) {
1688 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1689 goto error;
1690 }
1691
1692 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1693 if (ret)
1694 goto error;
1695
1696
1697 ret = iwl_send_phy_db_data(mvm->phy_db);
1698 if (ret)
1699 goto error;
1700
1701 ret = iwl_send_phy_cfg_cmd(mvm);
1702 if (ret)
1703 goto error;
1704
1705
1706 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
1707 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1708
1709 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
1710
1711
1712
1713
1714
1715
1716
1717
1718 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1719 if (ret)
1720 goto error;
1721 }
1722
1723 return 0;
1724 error:
1725 iwl_mvm_stop_device(mvm);
1726 return ret;
1727 }
1728
1729 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1730 struct iwl_rx_cmd_buffer *rxb)
1731 {
1732 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1733 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1734
1735 IWL_DEBUG_INFO(mvm,
1736 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1737 le32_to_cpu(mfuart_notif->installed_ver),
1738 le32_to_cpu(mfuart_notif->external_ver),
1739 le32_to_cpu(mfuart_notif->status),
1740 le32_to_cpu(mfuart_notif->duration));
1741
1742 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1743 IWL_DEBUG_INFO(mvm,
1744 "MFUART: image size: 0x%08x\n",
1745 le32_to_cpu(mfuart_notif->image_size));
1746 }