0001
0002
0003
0004
0005
0006 #include <linux/etherdevice.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/ieee80211.h>
0009 #include <linux/rtnetlink.h>
0010 #include <linux/module.h>
0011 #include <linux/moduleparam.h>
0012 #include <linux/mei_cl_bus.h>
0013 #include <linux/rcupdate.h>
0014 #include <linux/debugfs.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/wait.h>
0017 #include <linux/slab.h>
0018 #include <linux/mm.h>
0019
0020 #include <net/cfg80211.h>
0021
0022 #include "internal.h"
0023 #include "iwl-mei.h"
0024 #include "trace.h"
0025 #include "trace-data.h"
0026 #include "sap.h"
0027
0028 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
0029 MODULE_LICENSE("GPL");
0030
0031 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
0032 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 static struct mei_cl_device *iwl_mei_global_cldev;
0043 static DEFINE_MUTEX(iwl_mei_mutex);
0044 static unsigned long iwl_mei_status;
0045
0046 enum iwl_mei_status_bits {
0047 IWL_MEI_STATUS_SAP_CONNECTED,
0048 };
0049
0050 bool iwl_mei_is_connected(void)
0051 {
0052 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
0053 }
0054 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
0055
0056 #define SAP_VERSION 3
0057 #define SAP_CONTROL_BLOCK_ID 0x21504153
0058
0059 struct iwl_sap_q_ctrl_blk {
0060 __le32 wr_ptr;
0061 __le32 rd_ptr;
0062 __le32 size;
0063 };
0064
0065 enum iwl_sap_q_idx {
0066 SAP_QUEUE_IDX_NOTIF = 0,
0067 SAP_QUEUE_IDX_DATA,
0068 SAP_QUEUE_IDX_MAX,
0069 };
0070
0071 struct iwl_sap_dir {
0072 __le32 reserved;
0073 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
0074 };
0075
0076 enum iwl_sap_dir_idx {
0077 SAP_DIRECTION_HOST_TO_ME = 0,
0078 SAP_DIRECTION_ME_TO_HOST,
0079 SAP_DIRECTION_MAX,
0080 };
0081
0082 struct iwl_sap_shared_mem_ctrl_blk {
0083 __le32 sap_id;
0084 __le32 size;
0085 struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
0086 };
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 #define SAP_H2M_DATA_Q_SZ 48256
0107 #define SAP_M2H_DATA_Q_SZ 24128
0108 #define SAP_H2M_NOTIF_Q_SZ 2240
0109 #define SAP_M2H_NOTIF_Q_SZ 62720
0110
0111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \
0112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
0113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
0114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
0115
0116 #define IWL_MEI_SAP_SHARED_MEM_SZ \
0117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
0118
0119 struct iwl_mei_shared_mem_ptrs {
0120 struct iwl_sap_shared_mem_ctrl_blk *ctrl;
0121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
0122 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
0123 };
0124
0125 struct iwl_mei_filters {
0126 struct rcu_head rcu_head;
0127 struct iwl_sap_oob_filters filters;
0128 };
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 struct iwl_mei {
0158 wait_queue_head_t get_nvm_wq;
0159 struct work_struct send_csa_msg_wk;
0160 wait_queue_head_t get_ownership_wq;
0161 struct iwl_mei_shared_mem_ptrs shared_mem;
0162 struct mei_cl_device *cldev;
0163 struct iwl_mei_nvm *nvm;
0164 struct iwl_mei_filters __rcu *filters;
0165 bool got_ownership;
0166 bool amt_enabled;
0167 bool csa_throttled;
0168 bool csme_taking_ownership;
0169 bool link_prot_state;
0170 struct delayed_work csa_throttle_end_wk;
0171 spinlock_t data_q_lock;
0172
0173 atomic_t sap_seq_no;
0174 atomic_t seq_no;
0175
0176 struct dentry *dbgfs_dir;
0177 };
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 struct iwl_mei_cache {
0197 const struct iwl_mei_ops *ops;
0198 struct net_device __rcu *netdev;
0199 const struct iwl_sap_notif_connection_info *conn_info;
0200 const __le16 *power_limit;
0201 u32 rf_kill;
0202 u16 mcc;
0203 u8 mac_address[6];
0204 u8 nvm_address[6];
0205 void *priv;
0206 };
0207
0208 static struct iwl_mei_cache iwl_mei_cache = {
0209 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
0210 };
0211
0212 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
0213 {
0214 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0215
0216 if (mei_cldev_dma_unmap(cldev))
0217 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
0218 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
0219 }
0220
0221 #define HBM_DMA_BUF_ID_WLAN 1
0222
0223 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
0224 {
0225 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0226 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
0227
0228 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
0229 IWL_MEI_SAP_SHARED_MEM_SZ);
0230
0231 if (IS_ERR(mem->ctrl)) {
0232 int ret = PTR_ERR(mem->ctrl);
0233
0234 mem->ctrl = NULL;
0235
0236 return ret;
0237 }
0238
0239 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
0240
0241 return 0;
0242 }
0243
0244 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
0245 {
0246 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
0247 struct iwl_sap_dir *h2m;
0248 struct iwl_sap_dir *m2h;
0249 int dir, queue;
0250 u8 *q_head;
0251
0252 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
0253
0254 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
0255
0256 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
0257 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
0258
0259 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
0260 cpu_to_le32(SAP_H2M_DATA_Q_SZ);
0261 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
0262 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
0263 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
0264 cpu_to_le32(SAP_M2H_DATA_Q_SZ);
0265 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
0266 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
0267
0268
0269 q_head = (void *)(mem->ctrl + 1);
0270
0271
0272 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
0273 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
0274 mem->q_head[dir][queue] = q_head;
0275 q_head +=
0276 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
0277 mem->q_size[dir][queue] =
0278 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
0279 }
0280 }
0281
0282 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
0283 }
0284
0285 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
0286 struct iwl_sap_q_ctrl_blk *notif_q,
0287 u8 *q_head,
0288 const struct iwl_sap_hdr *hdr,
0289 u32 q_sz)
0290 {
0291 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
0292 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
0293 size_t room_in_buf;
0294 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
0295
0296 if (rd > q_sz || wr > q_sz) {
0297 dev_err(&cldev->dev,
0298 "Pointers are past the end of the buffer\n");
0299 return -EINVAL;
0300 }
0301
0302 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
0303
0304
0305 if (room_in_buf < tx_sz) {
0306 dev_err(&cldev->dev,
0307 "Not enough room in the buffer\n");
0308 return -ENOSPC;
0309 }
0310
0311 if (wr + tx_sz <= q_sz) {
0312 memcpy(q_head + wr, hdr, tx_sz);
0313 } else {
0314 memcpy(q_head + wr, hdr, q_sz - wr);
0315 memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
0316 }
0317
0318 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
0319 return 0;
0320 }
0321
0322 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
0323 {
0324 struct iwl_sap_q_ctrl_blk *notif_q;
0325 struct iwl_sap_dir *dir;
0326
0327 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
0328 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
0329
0330 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
0331 return true;
0332
0333 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
0334 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
0335 }
0336
0337 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
0338 {
0339 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0340 struct iwl_sap_me_msg_start msg = {
0341 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
0342 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
0343 };
0344 int ret;
0345
0346 lockdep_assert_held(&iwl_mei_mutex);
0347
0348 if (mei->csa_throttled)
0349 return 0;
0350
0351 trace_iwlmei_me_msg(&msg.hdr, true);
0352 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
0353 if (ret != sizeof(msg)) {
0354 dev_err(&cldev->dev,
0355 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
0356 ret);
0357 return ret;
0358 }
0359
0360 mei->csa_throttled = true;
0361
0362 schedule_delayed_work(&mei->csa_throttle_end_wk,
0363 msecs_to_jiffies(100));
0364
0365 return 0;
0366 }
0367
0368 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
0369 {
0370 struct iwl_mei *mei =
0371 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
0372
0373 mutex_lock(&iwl_mei_mutex);
0374
0375 mei->csa_throttled = false;
0376
0377 if (iwl_mei_host_to_me_data_pending(mei))
0378 iwl_mei_send_check_shared_area(mei->cldev);
0379
0380 mutex_unlock(&iwl_mei_mutex);
0381 }
0382
0383 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
0384 struct iwl_sap_hdr *hdr)
0385 {
0386 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0387 struct iwl_sap_q_ctrl_blk *notif_q;
0388 struct iwl_sap_dir *dir;
0389 void *q_head;
0390 u32 q_sz;
0391 int ret;
0392
0393 lockdep_assert_held(&iwl_mei_mutex);
0394
0395 if (!mei->shared_mem.ctrl) {
0396 dev_err(&cldev->dev,
0397 "No shared memory, can't send any SAP message\n");
0398 return -EINVAL;
0399 }
0400
0401 if (!iwl_mei_is_connected()) {
0402 dev_err(&cldev->dev,
0403 "Can't send a SAP message if we're not connected\n");
0404 return -ENODEV;
0405 }
0406
0407 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
0408 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
0409
0410 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
0411 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
0412 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
0413 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
0414 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
0415
0416 if (ret < 0)
0417 return ret;
0418
0419 trace_iwlmei_sap_cmd(hdr, true);
0420
0421 return iwl_mei_send_check_shared_area(cldev);
0422 }
0423
0424 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
0425 {
0426 struct iwl_sap_q_ctrl_blk *notif_q;
0427 struct iwl_sap_dir *dir;
0428 struct iwl_mei *mei;
0429 size_t room_in_buf;
0430 size_t tx_sz;
0431 size_t hdr_sz;
0432 u32 q_sz;
0433 u32 rd;
0434 u32 wr;
0435 u8 *q_head;
0436
0437 if (!iwl_mei_global_cldev)
0438 return;
0439
0440 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
0441
0442
0443
0444
0445
0446
0447
0448
0449 spin_lock_bh(&mei->data_q_lock);
0450
0451 if (!iwl_mei_is_connected()) {
0452 spin_unlock_bh(&mei->data_q_lock);
0453 return;
0454 }
0455
0456
0457
0458
0459
0460
0461 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
0462 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
0463 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
0464 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
0465
0466 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
0467 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
0468 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
0469 sizeof(struct iwl_sap_hdr);
0470 tx_sz = skb->len + hdr_sz;
0471
0472 if (rd > q_sz || wr > q_sz) {
0473 dev_err(&mei->cldev->dev,
0474 "can't write the data: pointers are past the end of the buffer\n");
0475 goto out;
0476 }
0477
0478 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
0479
0480
0481 if (room_in_buf < tx_sz) {
0482 dev_err(&mei->cldev->dev,
0483 "Not enough room in the buffer for this data\n");
0484 goto out;
0485 }
0486
0487 if (skb_headroom(skb) < hdr_sz) {
0488 dev_err(&mei->cldev->dev,
0489 "Not enough headroom in the skb to write the SAP header\n");
0490 goto out;
0491 }
0492
0493 if (cb_tx) {
0494 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
0495
0496 memset(cb_hdr, 0, sizeof(*cb_hdr));
0497 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
0498 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
0499 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
0500 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
0501 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
0502 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
0503 } else {
0504 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
0505
0506 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
0507 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
0508 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
0509 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
0510 }
0511
0512 if (wr + tx_sz <= q_sz) {
0513 skb_copy_bits(skb, 0, q_head + wr, tx_sz);
0514 } else {
0515 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
0516 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
0517 }
0518
0519 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
0520
0521 out:
0522 spin_unlock_bh(&mei->data_q_lock);
0523 }
0524
0525 static int
0526 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
0527 {
0528 struct iwl_sap_hdr msg = {
0529 .type = cpu_to_le16(type),
0530 };
0531
0532 return iwl_mei_send_sap_msg_payload(cldev, &msg);
0533 }
0534
0535 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
0536 {
0537 struct iwl_mei *mei =
0538 container_of(wk, struct iwl_mei, send_csa_msg_wk);
0539
0540 if (!iwl_mei_is_connected())
0541 return;
0542
0543 mutex_lock(&iwl_mei_mutex);
0544
0545 iwl_mei_send_check_shared_area(mei->cldev);
0546
0547 mutex_unlock(&iwl_mei_mutex);
0548 }
0549
0550
0551 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
0552 {
0553 struct sk_buff *skb = *pskb;
0554 struct iwl_mei *mei =
0555 rcu_dereference(skb->dev->rx_handler_data);
0556 struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
0557 bool rx_for_csme = false;
0558 rx_handler_result_t res;
0559
0560
0561
0562
0563
0564 if (!iwl_mei_is_connected()) {
0565 dev_err(&mei->cldev->dev,
0566 "Got an Rx packet, but we're not connected to SAP?\n");
0567 return RX_HANDLER_PASS;
0568 }
0569
0570 if (filters)
0571 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
0572 else
0573 res = RX_HANDLER_PASS;
0574
0575
0576
0577
0578
0579
0580 if (rx_for_csme)
0581 schedule_work(&mei->send_csa_msg_wk);
0582
0583 if (res != RX_HANDLER_PASS) {
0584 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
0585 dev_kfree_skb(skb);
0586 }
0587
0588 return res;
0589 }
0590
0591 static void
0592 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
0593 const struct iwl_sap_me_msg_start_ok *rsp,
0594 ssize_t len)
0595 {
0596 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0597
0598 if (len != sizeof(*rsp)) {
0599 dev_err(&cldev->dev,
0600 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
0601 dev_err(&cldev->dev,
0602 "size is incorrect: %zd instead of %zu\n",
0603 len, sizeof(*rsp));
0604 return;
0605 }
0606
0607 if (rsp->supported_version != SAP_VERSION) {
0608 dev_err(&cldev->dev,
0609 "didn't get the expected version: got %d\n",
0610 rsp->supported_version);
0611 return;
0612 }
0613
0614 mutex_lock(&iwl_mei_mutex);
0615 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
0616
0617 if (iwl_mei_cache.ops) {
0618 iwl_mei_send_sap_msg(mei->cldev,
0619 SAP_MSG_NOTIF_WIFIDR_UP);
0620 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
0621 }
0622
0623 mutex_unlock(&iwl_mei_mutex);
0624 }
0625
0626 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
0627 const struct iwl_sap_csme_filters *filters)
0628 {
0629 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
0630 struct iwl_mei_filters *new_filters;
0631 struct iwl_mei_filters *old_filters;
0632
0633 old_filters =
0634 rcu_dereference_protected(mei->filters,
0635 lockdep_is_held(&iwl_mei_mutex));
0636
0637 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
0638 if (!new_filters)
0639 return;
0640
0641
0642 new_filters->filters = filters->filters;
0643
0644 rcu_assign_pointer(mei->filters, new_filters);
0645
0646 if (old_filters)
0647 kfree_rcu(old_filters, rcu_head);
0648 }
0649
0650 static void
0651 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
0652 const struct iwl_sap_notif_conn_status *status)
0653 {
0654 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0655 struct iwl_mei_conn_info conn_info = {
0656 .lp_state = le32_to_cpu(status->link_prot_state),
0657 .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
0658 .channel = status->conn_info.channel,
0659 .band = status->conn_info.band,
0660 .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
0661 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
0662 };
0663
0664 if (!iwl_mei_cache.ops ||
0665 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
0666 return;
0667
0668 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
0669 ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
0670
0671 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
0672
0673 mei->link_prot_state = status->link_prot_state;
0674
0675
0676
0677
0678
0679
0680
0681
0682 if (mei->got_ownership)
0683 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
0684 status->link_prot_state);
0685 else
0686 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
0687 status->link_prot_state);
0688 }
0689
0690 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
0691 {
0692 struct iwl_sap_notif_host_link_up link_msg = {
0693 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
0694 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
0695 };
0696 struct iwl_sap_notif_country_code mcc_msg = {
0697 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
0698 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
0699 .mcc = cpu_to_le16(iwl_mei_cache.mcc),
0700 };
0701 struct iwl_sap_notif_sar_limits sar_msg = {
0702 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
0703 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
0704 };
0705 struct iwl_sap_notif_host_nic_info nic_info_msg = {
0706 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
0707 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
0708 };
0709 struct iwl_sap_msg_dw rfkill_msg = {
0710 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
0711 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
0712 .val = cpu_to_le32(iwl_mei_cache.rf_kill),
0713 };
0714
0715 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
0716
0717 if (iwl_mei_cache.conn_info) {
0718 link_msg.conn_info = *iwl_mei_cache.conn_info;
0719 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
0720 }
0721
0722 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
0723
0724 if (iwl_mei_cache.power_limit) {
0725 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
0726 sizeof(sar_msg.sar_chain_info_table));
0727 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
0728 }
0729
0730 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
0731 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
0732 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
0733
0734 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
0735 }
0736
0737 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
0738 const struct iwl_sap_msg_dw *dw)
0739 {
0740 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0741 struct net_device *netdev;
0742
0743
0744
0745
0746
0747 rtnl_lock();
0748 mutex_lock(&iwl_mei_mutex);
0749
0750 netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
0751 lockdep_is_held(&iwl_mei_mutex));
0752
0753 if (mei->amt_enabled == !!le32_to_cpu(dw->val))
0754 goto out;
0755
0756 mei->amt_enabled = dw->val;
0757
0758 if (mei->amt_enabled) {
0759 if (netdev)
0760 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
0761
0762 iwl_mei_set_init_conf(mei);
0763 } else {
0764 if (iwl_mei_cache.ops)
0765 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
0766 if (netdev)
0767 netdev_rx_handler_unregister(netdev);
0768 }
0769
0770 out:
0771 mutex_unlock(&iwl_mei_mutex);
0772 rtnl_unlock();
0773 }
0774
0775 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
0776 const struct iwl_sap_msg_dw *dw)
0777 {
0778 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0779
0780 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
0781 }
0782
0783 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
0784 const void *payload)
0785 {
0786
0787 if (iwl_mei_cache.ops)
0788 iwl_mei_send_sap_msg(cldev,
0789 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
0790 }
0791
0792 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
0793 const void *payload)
0794 {
0795 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0796
0797 dev_info(&cldev->dev, "CSME takes ownership\n");
0798
0799 mei->got_ownership = false;
0800
0801
0802
0803
0804
0805 mei->csme_taking_ownership = true;
0806
0807 if (iwl_mei_cache.ops)
0808 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
0809 }
0810
0811 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
0812 const struct iwl_sap_nvm *sap_nvm)
0813 {
0814 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0815 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
0816 int i;
0817
0818 kfree(mei->nvm);
0819 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
0820 if (!mei->nvm)
0821 return;
0822
0823 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
0824 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
0825 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
0826 mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
0827 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
0828
0829 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
0830 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
0831
0832 wake_up_all(&mei->get_nvm_wq);
0833 }
0834
0835 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
0836 const struct iwl_sap_msg_dw *dw)
0837 {
0838 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
0839
0840
0841
0842
0843
0844 if (!dw->val) {
0845 dev_info(&cldev->dev, "Ownership req denied\n");
0846 return;
0847 }
0848
0849 mei->got_ownership = true;
0850 wake_up_all(&mei->get_ownership_wq);
0851
0852 iwl_mei_send_sap_msg(cldev,
0853 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
0854
0855
0856 if (iwl_mei_cache.ops)
0857 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
0858 }
0859
0860 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
0861 const struct iwl_sap_hdr *hdr)
0862 {
0863 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
0864 }
0865
0866 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
0867 const struct iwl_sap_hdr *hdr)
0868 {
0869 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
0870 u16 type = le16_to_cpu(hdr->type);
0871
0872 dev_dbg(&cldev->dev,
0873 "Got a new SAP message: type %d, len %d, seq %d\n",
0874 le16_to_cpu(hdr->type), len,
0875 le32_to_cpu(hdr->seq_num));
0876
0877 #define SAP_MSG_HANDLER(_cmd, _handler, _sz) \
0878 case SAP_MSG_NOTIF_ ## _cmd: \
0879 if (len < _sz) { \
0880 dev_err(&cldev->dev, \
0881 "Bad size for %d: %u < %u\n", \
0882 le16_to_cpu(hdr->type), \
0883 (unsigned int)len, \
0884 (unsigned int)_sz); \
0885 break; \
0886 } \
0887 mutex_lock(&iwl_mei_mutex); \
0888 _handler(cldev, (const void *)hdr); \
0889 mutex_unlock(&iwl_mei_mutex); \
0890 break
0891
0892 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \
0893 case SAP_MSG_NOTIF_ ## _cmd: \
0894 if (len < _sz) { \
0895 dev_err(&cldev->dev, \
0896 "Bad size for %d: %u < %u\n", \
0897 le16_to_cpu(hdr->type), \
0898 (unsigned int)len, \
0899 (unsigned int)_sz); \
0900 break; \
0901 } \
0902 _handler(cldev, (const void *)hdr); \
0903 break
0904
0905 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \
0906 case SAP_MSG_NOTIF_ ## _cmd: \
0907 if (len < _sz) { \
0908 dev_err(&cldev->dev, \
0909 "Bad size for %d: %u < %u\n", \
0910 le16_to_cpu(hdr->type), \
0911 (unsigned int)len, \
0912 (unsigned int)_sz); \
0913 break; \
0914 } \
0915 break
0916
0917 switch (type) {
0918 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
0919 SAP_MSG_HANDLER(CSME_FILTERS,
0920 iwl_mei_handle_csme_filters,
0921 sizeof(struct iwl_sap_csme_filters));
0922 SAP_MSG_HANDLER(CSME_CONN_STATUS,
0923 iwl_mei_handle_conn_status,
0924 sizeof(struct iwl_sap_notif_conn_status));
0925 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
0926 iwl_mei_handle_amt_state,
0927 sizeof(struct iwl_sap_msg_dw));
0928 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
0929 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
0930 sizeof(struct iwl_sap_nvm));
0931 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
0932 iwl_mei_handle_rx_host_own_req,
0933 sizeof(struct iwl_sap_msg_dw));
0934 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
0935 sizeof(struct iwl_sap_msg_dw));
0936 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
0937 iwl_mei_handle_can_release_ownership, 0);
0938 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
0939 iwl_mei_handle_csme_taking_ownership, 0);
0940 default:
0941
0942
0943
0944
0945
0946 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
0947 le16_to_cpu(hdr->type), len);
0948 }
0949
0950 #undef SAP_MSG_HANDLER
0951 #undef SAP_MSG_HANDLER_NO_LOCK
0952 }
0953
0954 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
0955 u32 *_rd, u32 wr,
0956 void *_buf, u32 len)
0957 {
0958 u8 *buf = _buf;
0959 u32 rd = *_rd;
0960
0961 if (rd + len <= q_sz) {
0962 memcpy(buf, q_head + rd, len);
0963 rd += len;
0964 } else {
0965 memcpy(buf, q_head + rd, q_sz - rd);
0966 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
0967 rd = len - (q_sz - rd);
0968 }
0969
0970 *_rd = rd;
0971 }
0972
0973 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \
0974 IEEE80211_TKIP_IV_LEN + \
0975 sizeof(rfc1042_header) + ETH_TLEN)
0976
0977 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
0978 const u8 *q_head, u32 q_sz,
0979 u32 rd, u32 wr, ssize_t valid_rx_sz,
0980 struct sk_buff_head *tx_skbs)
0981 {
0982 struct iwl_sap_hdr hdr;
0983 struct net_device *netdev =
0984 rcu_dereference_protected(iwl_mei_cache.netdev,
0985 lockdep_is_held(&iwl_mei_mutex));
0986
0987 if (!netdev)
0988 return;
0989
0990 while (valid_rx_sz >= sizeof(hdr)) {
0991 struct ethhdr *ethhdr;
0992 unsigned char *data;
0993 struct sk_buff *skb;
0994 u16 len;
0995
0996 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
0997 valid_rx_sz -= sizeof(hdr);
0998 len = le16_to_cpu(hdr.len);
0999
1000 if (valid_rx_sz < len) {
1001 dev_err(&cldev->dev,
1002 "Data queue is corrupted: valid data len %zd, len %d\n",
1003 valid_rx_sz, len);
1004 break;
1005 }
1006
1007 if (len < sizeof(*ethhdr)) {
1008 dev_err(&cldev->dev,
1009 "Data len is smaller than an ethernet header? len = %d\n",
1010 len);
1011 }
1012
1013 valid_rx_sz -= len;
1014
1015 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1016 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1017 le16_to_cpu(hdr.type), len);
1018 continue;
1019 }
1020
1021
1022 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1023 if (!skb)
1024 continue;
1025
1026 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1027 ethhdr = skb_push(skb, sizeof(*ethhdr));
1028
1029 iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1030 ethhdr, sizeof(*ethhdr));
1031 len -= sizeof(*ethhdr);
1032
1033 skb_reset_mac_header(skb);
1034 skb_reset_network_header(skb);
1035 skb->protocol = ethhdr->h_proto;
1036
1037 data = skb_put(skb, len);
1038 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1039
1040
1041
1042
1043
1044
1045
1046
1047 __skb_queue_tail(tx_skbs, skb);
1048 }
1049 }
1050
1051 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1052 const u8 *q_head, u32 q_sz,
1053 u32 rd, u32 wr, ssize_t valid_rx_sz)
1054 {
1055 struct page *p = alloc_page(GFP_KERNEL);
1056 struct iwl_sap_hdr *hdr;
1057
1058 if (!p)
1059 return;
1060
1061 hdr = page_address(p);
1062
1063 while (valid_rx_sz >= sizeof(*hdr)) {
1064 u16 len;
1065
1066 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1067 valid_rx_sz -= sizeof(*hdr);
1068 len = le16_to_cpu(hdr->len);
1069
1070 if (valid_rx_sz < len)
1071 break;
1072
1073 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1074
1075 trace_iwlmei_sap_cmd(hdr, false);
1076 iwl_mei_handle_sap_msg(cldev, hdr);
1077 valid_rx_sz -= len;
1078 }
1079
1080
1081 if (valid_rx_sz)
1082 dev_err(&cldev->dev,
1083 "More data in the buffer although we read it all\n");
1084
1085 __free_page(p);
1086 }
1087
1088 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1089 struct iwl_sap_q_ctrl_blk *notif_q,
1090 const u8 *q_head,
1091 struct sk_buff_head *skbs,
1092 u32 q_sz)
1093 {
1094 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1095 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1096 ssize_t valid_rx_sz;
1097
1098 if (rd > q_sz || wr > q_sz) {
1099 dev_err(&cldev->dev,
1100 "Pointers are past the buffer limit\n");
1101 return;
1102 }
1103
1104 if (rd == wr)
1105 return;
1106
1107 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1108
1109 if (skbs)
1110 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1111 valid_rx_sz, skbs);
1112 else
1113 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1114 valid_rx_sz);
1115
1116
1117 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1118 }
1119
1120 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1121 {
1122 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1123 struct iwl_sap_q_ctrl_blk *notif_q;
1124 struct sk_buff_head tx_skbs;
1125 struct iwl_sap_dir *dir;
1126 void *q_head;
1127 u32 q_sz;
1128
1129 if (!mei->shared_mem.ctrl)
1130 return;
1131
1132 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1133 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1134 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1135 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1136
1137
1138
1139
1140
1141
1142 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1143
1144 mutex_lock(&iwl_mei_mutex);
1145 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1146 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1147 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1148 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1149
1150 __skb_queue_head_init(&tx_skbs);
1151
1152 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1153
1154 if (skb_queue_empty(&tx_skbs)) {
1155 mutex_unlock(&iwl_mei_mutex);
1156 return;
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166 rcu_read_lock();
1167
1168 mutex_unlock(&iwl_mei_mutex);
1169
1170 if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1171 dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1172 skb_queue_purge(&tx_skbs);
1173 goto out;
1174 }
1175
1176 while (!skb_queue_empty(&tx_skbs)) {
1177 struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1178
1179 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1180 dev_queue_xmit(skb);
1181 }
1182
1183 out:
1184 rcu_read_unlock();
1185 }
1186
1187 static void iwl_mei_rx(struct mei_cl_device *cldev)
1188 {
1189 struct iwl_sap_me_msg_hdr *hdr;
1190 u8 msg[100];
1191 ssize_t ret;
1192
1193 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1194 if (ret < 0) {
1195 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1196 return;
1197 }
1198
1199 if (ret == 0) {
1200 dev_err(&cldev->dev, "got an empty response\n");
1201 return;
1202 }
1203
1204 hdr = (void *)msg;
1205 trace_iwlmei_me_msg(hdr, false);
1206
1207 switch (le32_to_cpu(hdr->type)) {
1208 case SAP_ME_MSG_START_OK:
1209 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1210 sizeof(msg));
1211
1212 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1213 break;
1214 case SAP_ME_MSG_CHECK_SHARED_AREA:
1215 iwl_mei_handle_check_shared_area(cldev);
1216 break;
1217 default:
1218 dev_err(&cldev->dev, "got a RX notification: %d\n",
1219 le32_to_cpu(hdr->type));
1220 break;
1221 }
1222 }
1223
1224 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1225 {
1226 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1227 struct iwl_sap_me_msg_start msg = {
1228 .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1229 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1230 .hdr.len = cpu_to_le32(sizeof(msg)),
1231 .supported_versions[0] = SAP_VERSION,
1232 .init_data_seq_num = cpu_to_le16(0x100),
1233 .init_notif_seq_num = cpu_to_le16(0x800),
1234 };
1235 int ret;
1236
1237 trace_iwlmei_me_msg(&msg.hdr, true);
1238 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1239 if (ret != sizeof(msg)) {
1240 dev_err(&cldev->dev,
1241 "failed to send the SAP_ME_MSG_START message %d\n",
1242 ret);
1243 return ret;
1244 }
1245
1246 return 0;
1247 }
1248
1249 static int iwl_mei_enable(struct mei_cl_device *cldev)
1250 {
1251 int ret;
1252
1253 ret = mei_cldev_enable(cldev);
1254 if (ret < 0) {
1255 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1256 return ret;
1257 }
1258
1259 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1260 if (ret) {
1261 dev_err(&cldev->dev,
1262 "failed to register to the rx cb: %d\n", ret);
1263 mei_cldev_disable(cldev);
1264 return ret;
1265 }
1266
1267 return 0;
1268 }
1269
1270 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1271 {
1272 struct iwl_mei_nvm *nvm = NULL;
1273 struct iwl_mei *mei;
1274 int ret;
1275
1276 mutex_lock(&iwl_mei_mutex);
1277
1278 if (!iwl_mei_is_connected())
1279 goto out;
1280
1281 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1282
1283 if (!mei)
1284 goto out;
1285
1286 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1287 SAP_MSG_NOTIF_GET_NVM);
1288 if (ret)
1289 goto out;
1290
1291 mutex_unlock(&iwl_mei_mutex);
1292
1293 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1294 if (!ret)
1295 return NULL;
1296
1297 mutex_lock(&iwl_mei_mutex);
1298
1299 if (!iwl_mei_is_connected())
1300 goto out;
1301
1302 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1303
1304 if (!mei)
1305 goto out;
1306
1307 if (mei->nvm)
1308 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1309
1310 out:
1311 mutex_unlock(&iwl_mei_mutex);
1312 return nvm;
1313 }
1314 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1315
1316 int iwl_mei_get_ownership(void)
1317 {
1318 struct iwl_mei *mei;
1319 int ret;
1320
1321 mutex_lock(&iwl_mei_mutex);
1322
1323
1324 if (!iwl_mei_is_connected()) {
1325 ret = 0;
1326 goto out;
1327 }
1328
1329 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1330
1331 if (!mei) {
1332 ret = -ENODEV;
1333 goto out;
1334 }
1335
1336 if (!mei->amt_enabled) {
1337 ret = 0;
1338 goto out;
1339 }
1340
1341 if (mei->got_ownership) {
1342 ret = 0;
1343 goto out;
1344 }
1345
1346 ret = iwl_mei_send_sap_msg(mei->cldev,
1347 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1348 if (ret)
1349 goto out;
1350
1351 mutex_unlock(&iwl_mei_mutex);
1352
1353 ret = wait_event_timeout(mei->get_ownership_wq,
1354 mei->got_ownership, HZ / 2);
1355 if (!ret)
1356 return -ETIMEDOUT;
1357
1358 mutex_lock(&iwl_mei_mutex);
1359
1360
1361 if (!iwl_mei_is_connected()) {
1362 ret = 0;
1363 goto out;
1364 }
1365
1366 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1367
1368 if (!mei) {
1369 ret = -ENODEV;
1370 goto out;
1371 }
1372
1373 ret = !mei->got_ownership;
1374
1375 out:
1376 mutex_unlock(&iwl_mei_mutex);
1377 return ret;
1378 }
1379 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1380
1381 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1382 const struct iwl_mei_colloc_info *colloc_info)
1383 {
1384 struct iwl_sap_notif_host_link_up msg = {
1385 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1386 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1387 .conn_info = {
1388 .ssid_len = cpu_to_le32(conn_info->ssid_len),
1389 .channel = conn_info->channel,
1390 .band = conn_info->band,
1391 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1392 .auth_mode = cpu_to_le32(conn_info->auth_mode),
1393 },
1394 };
1395 struct iwl_mei *mei;
1396
1397 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1398 return;
1399
1400 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1401 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1402
1403 if (colloc_info) {
1404 msg.colloc_channel = colloc_info->channel;
1405 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1406 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1407 }
1408
1409 mutex_lock(&iwl_mei_mutex);
1410
1411 if (!iwl_mei_is_connected())
1412 goto out;
1413
1414 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1415
1416 if (!mei)
1417 goto out;
1418
1419 if (!mei->amt_enabled)
1420 goto out;
1421
1422 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1423
1424 out:
1425 kfree(iwl_mei_cache.conn_info);
1426 iwl_mei_cache.conn_info =
1427 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1428 mutex_unlock(&iwl_mei_mutex);
1429 }
1430 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1431
1432 void iwl_mei_host_disassociated(void)
1433 {
1434 struct iwl_mei *mei;
1435 struct iwl_sap_notif_host_link_down msg = {
1436 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1437 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1438 .type = HOST_LINK_DOWN_TYPE_LONG,
1439 };
1440
1441 mutex_lock(&iwl_mei_mutex);
1442
1443 if (!iwl_mei_is_connected())
1444 goto out;
1445
1446 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1447
1448 if (!mei)
1449 goto out;
1450
1451 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1452
1453 out:
1454 kfree(iwl_mei_cache.conn_info);
1455 iwl_mei_cache.conn_info = NULL;
1456 mutex_unlock(&iwl_mei_mutex);
1457 }
1458 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1459
1460 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1461 {
1462 struct iwl_mei *mei;
1463 u32 rfkill_state = 0;
1464 struct iwl_sap_msg_dw msg = {
1465 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1466 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1467 };
1468
1469 if (!sw_rfkill)
1470 rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1471
1472 if (!hw_rfkill)
1473 rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1474
1475 mutex_lock(&iwl_mei_mutex);
1476
1477 if (!iwl_mei_is_connected())
1478 goto out;
1479
1480 msg.val = cpu_to_le32(rfkill_state);
1481
1482 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1483
1484 if (!mei)
1485 goto out;
1486
1487 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1488
1489 out:
1490 iwl_mei_cache.rf_kill = rfkill_state;
1491 mutex_unlock(&iwl_mei_mutex);
1492 }
1493 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1494
1495 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1496 {
1497 struct iwl_mei *mei;
1498 struct iwl_sap_notif_host_nic_info msg = {
1499 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1500 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1501 };
1502
1503 mutex_lock(&iwl_mei_mutex);
1504
1505 if (!iwl_mei_is_connected())
1506 goto out;
1507
1508 ether_addr_copy(msg.mac_address, mac_address);
1509 ether_addr_copy(msg.nvm_address, nvm_address);
1510
1511 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1512
1513 if (!mei)
1514 goto out;
1515
1516 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1517
1518 out:
1519 ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1520 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1521 mutex_unlock(&iwl_mei_mutex);
1522 }
1523 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1524
1525 void iwl_mei_set_country_code(u16 mcc)
1526 {
1527 struct iwl_mei *mei;
1528 struct iwl_sap_notif_country_code msg = {
1529 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1530 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1531 .mcc = cpu_to_le16(mcc),
1532 };
1533
1534 mutex_lock(&iwl_mei_mutex);
1535
1536 if (!iwl_mei_is_connected())
1537 goto out;
1538
1539 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1540
1541 if (!mei)
1542 goto out;
1543
1544 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1545
1546 out:
1547 iwl_mei_cache.mcc = mcc;
1548 mutex_unlock(&iwl_mei_mutex);
1549 }
1550 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1551
1552 void iwl_mei_set_power_limit(const __le16 *power_limit)
1553 {
1554 struct iwl_mei *mei;
1555 struct iwl_sap_notif_sar_limits msg = {
1556 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1557 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1558 };
1559
1560 mutex_lock(&iwl_mei_mutex);
1561
1562 if (!iwl_mei_is_connected())
1563 goto out;
1564
1565 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1566
1567 if (!mei)
1568 goto out;
1569
1570 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1571
1572 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1573
1574 out:
1575 kfree(iwl_mei_cache.power_limit);
1576 iwl_mei_cache.power_limit = kmemdup(power_limit,
1577 sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1578 mutex_unlock(&iwl_mei_mutex);
1579 }
1580 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1581
1582 void iwl_mei_set_netdev(struct net_device *netdev)
1583 {
1584 struct iwl_mei *mei;
1585
1586 mutex_lock(&iwl_mei_mutex);
1587
1588 if (!iwl_mei_is_connected()) {
1589 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1590 goto out;
1591 }
1592
1593 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1594
1595 if (!mei)
1596 goto out;
1597
1598 if (!netdev) {
1599 struct net_device *dev =
1600 rcu_dereference_protected(iwl_mei_cache.netdev,
1601 lockdep_is_held(&iwl_mei_mutex));
1602
1603 if (!dev)
1604 goto out;
1605
1606 netdev_rx_handler_unregister(dev);
1607 }
1608
1609 rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1610
1611 if (netdev && mei->amt_enabled)
1612 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1613
1614 out:
1615 mutex_unlock(&iwl_mei_mutex);
1616 }
1617 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1618
1619 void iwl_mei_device_down(void)
1620 {
1621 struct iwl_mei *mei;
1622
1623 mutex_lock(&iwl_mei_mutex);
1624
1625 if (!iwl_mei_is_connected())
1626 goto out;
1627
1628 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1629
1630 if (!mei)
1631 goto out;
1632
1633 if (!mei->csme_taking_ownership)
1634 goto out;
1635
1636 iwl_mei_send_sap_msg(mei->cldev,
1637 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1638 mei->csme_taking_ownership = false;
1639 out:
1640 mutex_unlock(&iwl_mei_mutex);
1641 }
1642 EXPORT_SYMBOL_GPL(iwl_mei_device_down);
1643
1644 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1645 {
1646 int ret;
1647
1648
1649
1650
1651
1652 if (!priv)
1653 return -EINVAL;
1654
1655 mutex_lock(&iwl_mei_mutex);
1656
1657
1658 if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1659 ret = -EBUSY;
1660 goto out;
1661 }
1662
1663 iwl_mei_cache.priv = priv;
1664 iwl_mei_cache.ops = ops;
1665
1666 if (iwl_mei_global_cldev) {
1667 struct iwl_mei *mei =
1668 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1669
1670
1671 if (iwl_mei_is_connected()) {
1672 iwl_mei_send_sap_msg(mei->cldev,
1673 SAP_MSG_NOTIF_WIFIDR_UP);
1674 ops->rfkill(priv, mei->link_prot_state);
1675 }
1676 }
1677 ret = 0;
1678
1679 out:
1680 mutex_unlock(&iwl_mei_mutex);
1681 return ret;
1682 }
1683 EXPORT_SYMBOL_GPL(iwl_mei_register);
1684
1685 void iwl_mei_start_unregister(void)
1686 {
1687 mutex_lock(&iwl_mei_mutex);
1688
1689
1690 if (rcu_access_pointer(iwl_mei_cache.netdev))
1691 pr_err("Still had a netdev pointer set upon unregister\n");
1692
1693 kfree(iwl_mei_cache.conn_info);
1694 iwl_mei_cache.conn_info = NULL;
1695 kfree(iwl_mei_cache.power_limit);
1696 iwl_mei_cache.power_limit = NULL;
1697 iwl_mei_cache.ops = NULL;
1698
1699
1700 mutex_unlock(&iwl_mei_mutex);
1701 }
1702 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1703
1704 void iwl_mei_unregister_complete(void)
1705 {
1706 mutex_lock(&iwl_mei_mutex);
1707
1708 iwl_mei_cache.priv = NULL;
1709
1710 if (iwl_mei_global_cldev) {
1711 struct iwl_mei *mei =
1712 mei_cldev_get_drvdata(iwl_mei_global_cldev);
1713
1714 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
1715 mei->got_ownership = false;
1716 }
1717
1718 mutex_unlock(&iwl_mei_mutex);
1719 }
1720 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1721
1722 #if IS_ENABLED(CONFIG_DEBUG_FS)
1723
1724 static ssize_t
1725 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1726 const char __user *user_buf,
1727 size_t count, loff_t *ppos)
1728 {
1729 int ret;
1730
1731 mutex_lock(&iwl_mei_mutex);
1732
1733 if (!iwl_mei_global_cldev) {
1734 ret = -ENODEV;
1735 goto out;
1736 }
1737
1738 ret = iwl_mei_send_start(iwl_mei_global_cldev);
1739
1740 out:
1741 mutex_unlock(&iwl_mei_mutex);
1742 return ret ?: count;
1743 }
1744
1745 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1746 .write = iwl_mei_dbgfs_send_start_message_write,
1747 .open = simple_open,
1748 .llseek = default_llseek,
1749 };
1750
1751 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1752 const char __user *user_buf,
1753 size_t count, loff_t *ppos)
1754 {
1755 iwl_mei_get_ownership();
1756
1757 return count;
1758 }
1759
1760 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1761 .write = iwl_mei_dbgfs_req_ownership_write,
1762 .open = simple_open,
1763 .llseek = default_llseek,
1764 };
1765
1766 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1767 {
1768 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1769
1770 if (!mei->dbgfs_dir)
1771 return;
1772
1773 debugfs_create_ulong("status", S_IRUSR,
1774 mei->dbgfs_dir, &iwl_mei_status);
1775 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1776 mei, &iwl_mei_dbgfs_send_start_message_ops);
1777 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1778 mei, &iwl_mei_dbgfs_req_ownership_ops);
1779 }
1780
1781 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1782 {
1783 debugfs_remove_recursive(mei->dbgfs_dir);
1784 mei->dbgfs_dir = NULL;
1785 }
1786
1787 #else
1788
1789 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1790 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1791
1792 #endif
1793
1794 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 static int iwl_mei_probe(struct mei_cl_device *cldev,
1805 const struct mei_cl_device_id *id)
1806 {
1807 int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1808 struct iwl_mei *mei;
1809 int ret;
1810
1811 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1812 if (!mei)
1813 return -ENOMEM;
1814
1815 init_waitqueue_head(&mei->get_nvm_wq);
1816 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1817 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1818 iwl_mei_csa_throttle_end_wk);
1819 init_waitqueue_head(&mei->get_ownership_wq);
1820 spin_lock_init(&mei->data_q_lock);
1821
1822 mei_cldev_set_drvdata(cldev, mei);
1823 mei->cldev = cldev;
1824
1825 do {
1826 ret = iwl_mei_alloc_shared_mem(cldev);
1827 if (!ret)
1828 break;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 dev_dbg(&cldev->dev,
1839 "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1840 ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1841 msleep(100);
1842 alloc_retry--;
1843 } while (alloc_retry);
1844
1845 if (ret) {
1846 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1847 ret);
1848 goto free;
1849 }
1850
1851 iwl_mei_init_shared_mem(mei);
1852
1853 ret = iwl_mei_enable(cldev);
1854 if (ret)
1855 goto free_shared_mem;
1856
1857 iwl_mei_dbgfs_register(mei);
1858
1859
1860
1861
1862
1863 mutex_lock(&iwl_mei_mutex);
1864 ret = iwl_mei_send_start(cldev);
1865 mutex_unlock(&iwl_mei_mutex);
1866 if (ret)
1867 goto debugfs_unregister;
1868
1869
1870 iwl_mei_global_cldev = cldev;
1871
1872 return 0;
1873
1874 debugfs_unregister:
1875 iwl_mei_dbgfs_unregister(mei);
1876 mei_cldev_disable(cldev);
1877 free_shared_mem:
1878 iwl_mei_free_shared_mem(cldev);
1879 free:
1880 mei_cldev_set_drvdata(cldev, NULL);
1881 devm_kfree(&cldev->dev, mei);
1882
1883 return ret;
1884 }
1885
1886 #define SEND_SAP_MAX_WAIT_ITERATION 10
1887
1888 static void iwl_mei_remove(struct mei_cl_device *cldev)
1889 {
1890 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1891 int i;
1892
1893
1894
1895
1896
1897 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops)
1898 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
1899
1900 if (rcu_access_pointer(iwl_mei_cache.netdev)) {
1901 struct net_device *dev;
1902
1903
1904
1905
1906
1907 rtnl_lock();
1908 mutex_lock(&iwl_mei_mutex);
1909
1910
1911
1912
1913
1914 dev = rcu_dereference_protected(iwl_mei_cache.netdev,
1915 lockdep_is_held(&iwl_mei_mutex));
1916
1917 netdev_rx_handler_unregister(dev);
1918 mutex_unlock(&iwl_mei_mutex);
1919 rtnl_unlock();
1920 }
1921
1922 mutex_lock(&iwl_mei_mutex);
1923
1924
1925
1926
1927
1928 mei->csa_throttled = false;
1929 iwl_mei_send_sap_msg(mei->cldev,
1930 SAP_MSG_NOTIF_HOST_GOES_DOWN);
1931
1932 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
1933 if (!iwl_mei_host_to_me_data_pending(mei))
1934 break;
1935
1936 msleep(5);
1937 }
1938
1939
1940
1941
1942
1943
1944 if (i == SEND_SAP_MAX_WAIT_ITERATION)
1945 dev_err(&mei->cldev->dev,
1946 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
1947
1948 mutex_unlock(&iwl_mei_mutex);
1949
1950
1951
1952
1953
1954
1955
1956
1957 spin_lock_bh(&mei->data_q_lock);
1958 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
1959 spin_unlock_bh(&mei->data_q_lock);
1960
1961 if (iwl_mei_cache.ops)
1962 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
1963
1964
1965
1966
1967
1968
1969
1970 mei_cldev_disable(cldev);
1971
1972
1973
1974
1975
1976
1977 cancel_work_sync(&mei->send_csa_msg_wk);
1978 cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
1979
1980
1981
1982
1983
1984
1985 wake_up_all(&mei->get_ownership_wq);
1986
1987 mutex_lock(&iwl_mei_mutex);
1988
1989 iwl_mei_global_cldev = NULL;
1990
1991 wake_up_all(&mei->get_nvm_wq);
1992
1993 iwl_mei_free_shared_mem(cldev);
1994
1995 iwl_mei_dbgfs_unregister(mei);
1996
1997 mei_cldev_set_drvdata(cldev, NULL);
1998
1999 kfree(mei->nvm);
2000
2001 kfree(rcu_access_pointer(mei->filters));
2002
2003 devm_kfree(&cldev->dev, mei);
2004
2005 mutex_unlock(&iwl_mei_mutex);
2006 }
2007
2008 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2009 {
2010 .name = KBUILD_MODNAME,
2011 .uuid = MEI_WLAN_UUID,
2012 .version = MEI_CL_VERSION_ANY,
2013 },
2014
2015
2016 { }
2017 };
2018
2019
2020
2021
2022
2023
2024 static struct mei_cl_driver iwl_mei_cl_driver = {
2025 .id_table = iwl_mei_tbl,
2026 .name = KBUILD_MODNAME,
2027 .probe = iwl_mei_probe,
2028 .remove = iwl_mei_remove,
2029 };
2030
2031 module_mei_cl_driver(iwl_mei_cl_driver);