0001
0002
0003
0004
0005
0006
0007 #ifndef __iwl_trans_h__
0008 #define __iwl_trans_h__
0009
0010 #include <linux/ieee80211.h>
0011 #include <linux/mm.h> /* for page_address */
0012 #include <linux/lockdep.h>
0013 #include <linux/kernel.h>
0014
0015 #include "iwl-debug.h"
0016 #include "iwl-config.h"
0017 #include "fw/img.h"
0018 #include "iwl-op-mode.h"
0019 #include <linux/firmware.h>
0020 #include "fw/api/cmdhdr.h"
0021 #include "fw/api/txq.h"
0022 #include "fw/api/dbg-tlv.h"
0023 #include "iwl-dbg-tlv.h"
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
0060
0061 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
0062 #define FH_RSCSR_FRAME_INVALID 0x55550000
0063 #define FH_RSCSR_FRAME_ALIGN 0x40
0064 #define FH_RSCSR_RPA_EN BIT(25)
0065 #define FH_RSCSR_RADA_EN BIT(26)
0066 #define FH_RSCSR_RXQ_POS 16
0067 #define FH_RSCSR_RXQ_MASK 0x3F0000
0068
0069 struct iwl_rx_packet {
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 __le32 len_n_flags;
0088 struct iwl_cmd_header hdr;
0089 u8 data[];
0090 } __packed;
0091
0092 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
0093 {
0094 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
0095 }
0096
0097 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
0098 {
0099 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 enum CMD_MODE {
0115 CMD_ASYNC = BIT(0),
0116 CMD_WANT_SKB = BIT(1),
0117 CMD_SEND_IN_RFKILL = BIT(2),
0118 CMD_WANT_ASYNC_CALLBACK = BIT(3),
0119 CMD_SEND_IN_D3 = BIT(4),
0120 };
0121
0122 #define DEF_CMD_PAYLOAD_SIZE 320
0123
0124
0125
0126
0127
0128
0129
0130
0131 struct iwl_device_cmd {
0132 union {
0133 struct {
0134 struct iwl_cmd_header hdr;
0135 u8 payload[DEF_CMD_PAYLOAD_SIZE];
0136 };
0137 struct {
0138 struct iwl_cmd_header_wide hdr_wide;
0139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
0140 sizeof(struct iwl_cmd_header_wide) +
0141 sizeof(struct iwl_cmd_header)];
0142 };
0143 };
0144 } __packed;
0145
0146
0147
0148
0149
0150
0151
0152
0153 struct iwl_device_tx_cmd {
0154 struct iwl_cmd_header hdr;
0155 u8 payload[];
0156 } __packed;
0157
0158 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
0159
0160
0161
0162
0163
0164 #define IWL_MAX_CMD_TBS_PER_TFD 2
0165
0166
0167
0168
0169
0170 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 enum iwl_hcmd_dataflag {
0188 IWL_HCMD_DFL_NOCOPY = BIT(0),
0189 IWL_HCMD_DFL_DUP = BIT(1),
0190 };
0191
0192 enum iwl_error_event_table_status {
0193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
0194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
0195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
0196 IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
0197 IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
0198 IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
0199 IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
0200 };
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215 struct iwl_host_cmd {
0216 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
0217 struct iwl_rx_packet *resp_pkt;
0218 unsigned long _rx_page_addr;
0219 u32 _rx_page_order;
0220
0221 u32 flags;
0222 u32 id;
0223 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
0224 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
0225 };
0226
0227 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
0228 {
0229 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
0230 }
0231
0232 struct iwl_rx_cmd_buffer {
0233 struct page *_page;
0234 int _offset;
0235 bool _page_stolen;
0236 u32 _rx_page_order;
0237 unsigned int truesize;
0238 };
0239
0240 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
0241 {
0242 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
0243 }
0244
0245 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
0246 {
0247 return r->_offset;
0248 }
0249
0250 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
0251 {
0252 r->_page_stolen = true;
0253 get_page(r->_page);
0254 return r->_page;
0255 }
0256
0257 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
0258 {
0259 __free_pages(r->_page, r->_rx_page_order);
0260 }
0261
0262 #define MAX_NO_RECLAIM_CMDS 6
0263
0264 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
0265
0266
0267
0268
0269
0270 #define IWL_MAX_HW_QUEUES 32
0271 #define IWL_MAX_TVQM_QUEUES 512
0272
0273 #define IWL_MAX_TID_COUNT 8
0274 #define IWL_MGMT_TID 15
0275 #define IWL_FRAME_LIMIT 64
0276 #define IWL_MAX_RX_HW_QUEUES 16
0277 #define IWL_9000_MAX_RX_HW_QUEUES 6
0278
0279
0280
0281
0282
0283
0284 enum iwl_d3_status {
0285 IWL_D3_STATUS_ALIVE,
0286 IWL_D3_STATUS_RESET,
0287 };
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 enum iwl_trans_status {
0306 STATUS_SYNC_HCMD_ACTIVE,
0307 STATUS_DEVICE_ENABLED,
0308 STATUS_TPOWER_PMI,
0309 STATUS_INT_ENABLED,
0310 STATUS_RFKILL_HW,
0311 STATUS_RFKILL_OPMODE,
0312 STATUS_FW_ERROR,
0313 STATUS_TRANS_GOING_IDLE,
0314 STATUS_TRANS_IDLE,
0315 STATUS_TRANS_DEAD,
0316 STATUS_SUPPRESS_CMD_ERROR_ONCE,
0317 };
0318
0319 static inline int
0320 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
0321 {
0322 switch (rb_size) {
0323 case IWL_AMSDU_2K:
0324 return get_order(2 * 1024);
0325 case IWL_AMSDU_4K:
0326 return get_order(4 * 1024);
0327 case IWL_AMSDU_8K:
0328 return get_order(8 * 1024);
0329 case IWL_AMSDU_12K:
0330 return get_order(16 * 1024);
0331 default:
0332 WARN_ON(1);
0333 return -1;
0334 }
0335 }
0336
0337 static inline int
0338 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
0339 {
0340 switch (rb_size) {
0341 case IWL_AMSDU_2K:
0342 return 2 * 1024;
0343 case IWL_AMSDU_4K:
0344 return 4 * 1024;
0345 case IWL_AMSDU_8K:
0346 return 8 * 1024;
0347 case IWL_AMSDU_12K:
0348 return 16 * 1024;
0349 default:
0350 WARN_ON(1);
0351 return 0;
0352 }
0353 }
0354
0355 struct iwl_hcmd_names {
0356 u8 cmd_id;
0357 const char *const cmd_name;
0358 };
0359
0360 #define HCMD_NAME(x) \
0361 { .cmd_id = x, .cmd_name = #x }
0362
0363 struct iwl_hcmd_arr {
0364 const struct iwl_hcmd_names *arr;
0365 int size;
0366 };
0367
0368 #define HCMD_ARR(x) \
0369 { .arr = x, .size = ARRAY_SIZE(x) }
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 struct iwl_dump_sanitize_ops {
0380 void (*frob_txf)(void *ctx, void *buf, size_t buflen);
0381 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
0382 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
0383 };
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 struct iwl_trans_config {
0414 struct iwl_op_mode *op_mode;
0415
0416 u8 cmd_queue;
0417 u8 cmd_fifo;
0418 unsigned int cmd_q_wdg_timeout;
0419 const u8 *no_reclaim_cmds;
0420 unsigned int n_no_reclaim_cmds;
0421
0422 enum iwl_amsdu_size rx_buf_size;
0423 bool bc_table_dword;
0424 bool scd_set_active;
0425 const struct iwl_hcmd_arr *command_groups;
0426 int command_groups_size;
0427
0428 u8 cb_data_offs;
0429 bool fw_reset_handshake;
0430 u8 queue_alloc_cmd_ver;
0431 };
0432
0433 struct iwl_trans_dump_data {
0434 u32 len;
0435 u8 data[];
0436 };
0437
0438 struct iwl_trans;
0439
0440 struct iwl_trans_txq_scd_cfg {
0441 u8 fifo;
0442 u8 sta_id;
0443 u8 tid;
0444 bool aggregate;
0445 int frame_limit;
0446 };
0447
0448
0449
0450
0451
0452
0453
0454
0455 struct iwl_trans_rxq_dma_data {
0456 u64 fr_bd_cb;
0457 u32 fr_bd_wid;
0458 u64 urbd_stts_wrptr;
0459 u64 ur_bd_cb;
0460 };
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 struct iwl_trans_ops {
0549
0550 int (*start_hw)(struct iwl_trans *iwl_trans);
0551 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
0552 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
0553 bool run_in_rfkill);
0554 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
0555 void (*stop_device)(struct iwl_trans *trans);
0556
0557 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
0558 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
0559 bool test, bool reset);
0560
0561 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
0562
0563 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
0564 struct iwl_device_tx_cmd *dev_cmd, int queue);
0565 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
0566 struct sk_buff_head *skbs);
0567
0568 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
0569
0570 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
0571 const struct iwl_trans_txq_scd_cfg *cfg,
0572 unsigned int queue_wdg_timeout);
0573 void (*txq_disable)(struct iwl_trans *trans, int queue,
0574 bool configure_scd);
0575
0576 int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
0577 u32 sta_mask, u8 tid,
0578 int size, unsigned int queue_wdg_timeout);
0579 void (*txq_free)(struct iwl_trans *trans, int queue);
0580 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
0581 struct iwl_trans_rxq_dma_data *data);
0582
0583 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
0584 bool shared);
0585
0586 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
0587 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
0588 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
0589 bool freeze);
0590 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
0591
0592 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
0593 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
0594 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
0595 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
0596 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
0597 int (*read_mem)(struct iwl_trans *trans, u32 addr,
0598 void *buf, int dwords);
0599 int (*write_mem)(struct iwl_trans *trans, u32 addr,
0600 const void *buf, int dwords);
0601 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
0602 void (*configure)(struct iwl_trans *trans,
0603 const struct iwl_trans_config *trans_cfg);
0604 void (*set_pmi)(struct iwl_trans *trans, bool state);
0605 int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
0606 bool (*grab_nic_access)(struct iwl_trans *trans);
0607 void (*release_nic_access)(struct iwl_trans *trans);
0608 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
0609 u32 value);
0610
0611 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
0612 u32 dump_mask,
0613 const struct iwl_dump_sanitize_ops *sanitize_ops,
0614 void *sanitize_ctx);
0615 void (*debugfs_cleanup)(struct iwl_trans *trans);
0616 void (*sync_nmi)(struct iwl_trans *trans);
0617 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
0618 int (*set_reduce_power)(struct iwl_trans *trans,
0619 const void *data, u32 len);
0620 void (*interrupts)(struct iwl_trans *trans, bool enable);
0621 int (*imr_dma_data)(struct iwl_trans *trans,
0622 u32 dst_addr, u64 src_addr,
0623 u32 byte_cnt);
0624
0625 };
0626
0627
0628
0629
0630
0631
0632
0633
0634 enum iwl_trans_state {
0635 IWL_TRANS_NO_FW,
0636 IWL_TRANS_FW_STARTED,
0637 IWL_TRANS_FW_ALIVE,
0638 };
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 enum iwl_plat_pm_mode {
0675 IWL_PLAT_PM_MODE_DISABLED,
0676 IWL_PLAT_PM_MODE_D3,
0677 };
0678
0679
0680
0681
0682
0683
0684
0685
0686 enum iwl_ini_cfg_state {
0687 IWL_INI_CFG_STATE_NOT_LOADED,
0688 IWL_INI_CFG_STATE_LOADED,
0689 IWL_INI_CFG_STATE_CORRUPTED,
0690 };
0691
0692
0693 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
0694
0695
0696
0697
0698
0699
0700
0701 struct iwl_dram_data {
0702 dma_addr_t physical;
0703 void *block;
0704 int size;
0705 };
0706
0707
0708
0709
0710
0711
0712 struct iwl_fw_mon {
0713 u32 num_frags;
0714 struct iwl_dram_data *frags;
0715 };
0716
0717
0718
0719
0720
0721
0722
0723
0724 struct iwl_self_init_dram {
0725 struct iwl_dram_data *fw;
0726 int fw_cnt;
0727 struct iwl_dram_data *paging;
0728 int paging_cnt;
0729 };
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 struct iwl_imr_data {
0742 u32 imr_enable;
0743 u32 imr_size;
0744 u32 sram_addr;
0745 u32 sram_size;
0746 u32 imr2sram_remainbyte;
0747 u64 imr_curr_addr;
0748 __le64 imr_base_addr;
0749 };
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779 struct iwl_trans_debug {
0780 u8 n_dest_reg;
0781 bool rec_on;
0782
0783 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
0784 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
0785 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
0786
0787 u32 lmac_error_event_table[2];
0788 u32 umac_error_event_table;
0789 u32 tcm_error_event_table[2];
0790 u32 rcm_error_event_table[2];
0791 unsigned int error_event_table_tlv_status;
0792
0793 enum iwl_ini_cfg_state internal_ini_cfg;
0794 enum iwl_ini_cfg_state external_ini_cfg;
0795
0796 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
0797 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
0798
0799 struct iwl_dram_data fw_mon;
0800
0801 bool hw_error;
0802 enum iwl_fw_ini_buffer_location ini_dest;
0803
0804 u64 unsupported_region_msk;
0805 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
0806 struct list_head debug_info_tlv_list;
0807 struct iwl_dbg_tlv_time_point_data
0808 time_point[IWL_FW_INI_TIME_POINT_NUM];
0809 struct list_head periodic_trig_list;
0810
0811 u32 domains_bitmap;
0812 u32 ucode_preset;
0813 bool restart_required;
0814 u32 last_tp_resetfw;
0815 struct iwl_imr_data imr_data;
0816 };
0817
0818 struct iwl_dma_ptr {
0819 dma_addr_t dma;
0820 void *addr;
0821 size_t size;
0822 };
0823
0824 struct iwl_cmd_meta {
0825
0826 struct iwl_host_cmd *source;
0827 u32 flags;
0828 u32 tbs;
0829 };
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840 #define IWL_FIRST_TB_SIZE 20
0841 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
0842
0843 struct iwl_pcie_txq_entry {
0844 void *cmd;
0845 struct sk_buff *skb;
0846
0847 const void *free_buf;
0848 struct iwl_cmd_meta meta;
0849 };
0850
0851 struct iwl_pcie_first_tb_buf {
0852 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
0853 };
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897 struct iwl_txq {
0898 void *tfds;
0899 struct iwl_pcie_first_tb_buf *first_tb_bufs;
0900 dma_addr_t first_tb_dma;
0901 struct iwl_pcie_txq_entry *entries;
0902
0903 spinlock_t lock;
0904 unsigned long frozen_expiry_remainder;
0905 struct timer_list stuck_timer;
0906 struct iwl_trans *trans;
0907 bool need_update;
0908 bool frozen;
0909 bool ampdu;
0910 int block;
0911 unsigned long wd_timeout;
0912 struct sk_buff_head overflow_q;
0913 struct iwl_dma_ptr bc_tbl;
0914
0915 int write_ptr;
0916 int read_ptr;
0917 dma_addr_t dma_addr;
0918 int n_window;
0919 u32 id;
0920 int low_mark;
0921 int high_mark;
0922
0923 bool overflow_tx;
0924 };
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 struct iwl_trans_txqs {
0938 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
0939 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
0940 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
0941 struct dma_pool *bc_pool;
0942 size_t bc_tbl_size;
0943 bool bc_table_dword;
0944 u8 page_offs;
0945 u8 dev_cmd_offs;
0946 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
0947
0948 struct {
0949 u8 fifo;
0950 u8 q_id;
0951 unsigned int wdg_timeout;
0952 } cmd;
0953
0954 struct {
0955 u8 max_tbs;
0956 u16 size;
0957 u8 addr_size;
0958 } tfd;
0959
0960 struct iwl_dma_ptr scd_bc_tbls;
0961
0962 u8 queue_alloc_cmd_ver;
0963 };
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003 struct iwl_trans {
1004 bool csme_own;
1005 const struct iwl_trans_ops *ops;
1006 struct iwl_op_mode *op_mode;
1007 const struct iwl_cfg_trans_params *trans_cfg;
1008 const struct iwl_cfg *cfg;
1009 struct iwl_drv *drv;
1010 enum iwl_trans_state state;
1011 unsigned long status;
1012
1013 struct device *dev;
1014 u32 max_skb_frags;
1015 u32 hw_rev;
1016 u32 hw_rev_step;
1017 u32 hw_rf_id;
1018 u32 hw_id;
1019 char hw_id_str[52];
1020 u32 sku_id[3];
1021
1022 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
1023
1024 bool pm_support;
1025 bool ltr_enabled;
1026 u8 pnvm_loaded:1;
1027 u8 reduce_power_loaded:1;
1028
1029 const struct iwl_hcmd_arr *command_groups;
1030 int command_groups_size;
1031 bool wide_cmd_header;
1032
1033 wait_queue_head_t wait_command_queue;
1034 u8 num_rx_queues;
1035
1036 size_t iml_len;
1037 u8 *iml;
1038
1039
1040 struct kmem_cache *dev_cmd_pool;
1041 char dev_cmd_pool_name[50];
1042
1043 struct dentry *dbgfs_dir;
1044
1045 #ifdef CONFIG_LOCKDEP
1046 struct lockdep_map sync_cmd_lockdep_map;
1047 #endif
1048
1049 struct iwl_trans_debug dbg;
1050 struct iwl_self_init_dram init_dram;
1051
1052 enum iwl_plat_pm_mode system_pm_mode;
1053
1054 const char *name;
1055 struct iwl_trans_txqs txqs;
1056
1057
1058
1059 char trans_specific[] __aligned(sizeof(void *));
1060 };
1061
1062 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1063 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1064
1065 static inline void iwl_trans_configure(struct iwl_trans *trans,
1066 const struct iwl_trans_config *trans_cfg)
1067 {
1068 trans->op_mode = trans_cfg->op_mode;
1069
1070 trans->ops->configure(trans, trans_cfg);
1071 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1072 }
1073
1074 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1075 {
1076 might_sleep();
1077
1078 return trans->ops->start_hw(trans);
1079 }
1080
1081 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1082 {
1083 might_sleep();
1084
1085 if (trans->ops->op_mode_leave)
1086 trans->ops->op_mode_leave(trans);
1087
1088 trans->op_mode = NULL;
1089
1090 trans->state = IWL_TRANS_NO_FW;
1091 }
1092
1093 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1094 {
1095 might_sleep();
1096
1097 trans->state = IWL_TRANS_FW_ALIVE;
1098
1099 trans->ops->fw_alive(trans, scd_addr);
1100 }
1101
1102 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1103 const struct fw_img *fw,
1104 bool run_in_rfkill)
1105 {
1106 int ret;
1107
1108 might_sleep();
1109
1110 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1111
1112 clear_bit(STATUS_FW_ERROR, &trans->status);
1113 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1114 if (ret == 0)
1115 trans->state = IWL_TRANS_FW_STARTED;
1116
1117 return ret;
1118 }
1119
1120 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1121 {
1122 might_sleep();
1123
1124 trans->ops->stop_device(trans);
1125
1126 trans->state = IWL_TRANS_NO_FW;
1127 }
1128
1129 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1130 bool reset)
1131 {
1132 might_sleep();
1133 if (!trans->ops->d3_suspend)
1134 return 0;
1135
1136 return trans->ops->d3_suspend(trans, test, reset);
1137 }
1138
1139 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1140 enum iwl_d3_status *status,
1141 bool test, bool reset)
1142 {
1143 might_sleep();
1144 if (!trans->ops->d3_resume)
1145 return 0;
1146
1147 return trans->ops->d3_resume(trans, status, test, reset);
1148 }
1149
1150 static inline struct iwl_trans_dump_data *
1151 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1152 const struct iwl_dump_sanitize_ops *sanitize_ops,
1153 void *sanitize_ctx)
1154 {
1155 if (!trans->ops->dump_data)
1156 return NULL;
1157 return trans->ops->dump_data(trans, dump_mask,
1158 sanitize_ops, sanitize_ctx);
1159 }
1160
1161 static inline struct iwl_device_tx_cmd *
1162 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1163 {
1164 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1165 }
1166
1167 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1168
1169 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1170 struct iwl_device_tx_cmd *dev_cmd)
1171 {
1172 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1173 }
1174
1175 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1176 struct iwl_device_tx_cmd *dev_cmd, int queue)
1177 {
1178 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1179 return -EIO;
1180
1181 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1182 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1183 return -EIO;
1184 }
1185
1186 return trans->ops->tx(trans, skb, dev_cmd, queue);
1187 }
1188
1189 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1190 int ssn, struct sk_buff_head *skbs)
1191 {
1192 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1193 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1194 return;
1195 }
1196
1197 trans->ops->reclaim(trans, queue, ssn, skbs);
1198 }
1199
1200 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1201 int ptr)
1202 {
1203 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1204 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1205 return;
1206 }
1207
1208 trans->ops->set_q_ptrs(trans, queue, ptr);
1209 }
1210
1211 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1212 bool configure_scd)
1213 {
1214 trans->ops->txq_disable(trans, queue, configure_scd);
1215 }
1216
1217 static inline bool
1218 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1219 const struct iwl_trans_txq_scd_cfg *cfg,
1220 unsigned int queue_wdg_timeout)
1221 {
1222 might_sleep();
1223
1224 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1225 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1226 return false;
1227 }
1228
1229 return trans->ops->txq_enable(trans, queue, ssn,
1230 cfg, queue_wdg_timeout);
1231 }
1232
1233 static inline int
1234 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1235 struct iwl_trans_rxq_dma_data *data)
1236 {
1237 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1238 return -ENOTSUPP;
1239
1240 return trans->ops->rxq_dma_data(trans, queue, data);
1241 }
1242
1243 static inline void
1244 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1245 {
1246 if (WARN_ON_ONCE(!trans->ops->txq_free))
1247 return;
1248
1249 trans->ops->txq_free(trans, queue);
1250 }
1251
1252 static inline int
1253 iwl_trans_txq_alloc(struct iwl_trans *trans,
1254 u32 flags, u32 sta_mask, u8 tid,
1255 int size, unsigned int wdg_timeout)
1256 {
1257 might_sleep();
1258
1259 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1260 return -ENOTSUPP;
1261
1262 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1263 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1264 return -EIO;
1265 }
1266
1267 return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
1268 size, wdg_timeout);
1269 }
1270
1271 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1272 int queue, bool shared_mode)
1273 {
1274 if (trans->ops->txq_set_shared_mode)
1275 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1276 }
1277
1278 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1279 int fifo, int sta_id, int tid,
1280 int frame_limit, u16 ssn,
1281 unsigned int queue_wdg_timeout)
1282 {
1283 struct iwl_trans_txq_scd_cfg cfg = {
1284 .fifo = fifo,
1285 .sta_id = sta_id,
1286 .tid = tid,
1287 .frame_limit = frame_limit,
1288 .aggregate = sta_id >= 0,
1289 };
1290
1291 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1292 }
1293
1294 static inline
1295 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1296 unsigned int queue_wdg_timeout)
1297 {
1298 struct iwl_trans_txq_scd_cfg cfg = {
1299 .fifo = fifo,
1300 .sta_id = -1,
1301 .tid = IWL_MAX_TID_COUNT,
1302 .frame_limit = IWL_FRAME_LIMIT,
1303 .aggregate = false,
1304 };
1305
1306 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1307 }
1308
1309 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1310 unsigned long txqs,
1311 bool freeze)
1312 {
1313 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1314 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1315 return;
1316 }
1317
1318 if (trans->ops->freeze_txq_timer)
1319 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1320 }
1321
1322 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1323 bool block)
1324 {
1325 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1326 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1327 return;
1328 }
1329
1330 if (trans->ops->block_txq_ptrs)
1331 trans->ops->block_txq_ptrs(trans, block);
1332 }
1333
1334 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1335 u32 txqs)
1336 {
1337 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1338 return -ENOTSUPP;
1339
1340
1341 if (trans->state != IWL_TRANS_FW_ALIVE) {
1342 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1343 return -EIO;
1344 }
1345
1346 return trans->ops->wait_tx_queues_empty(trans, txqs);
1347 }
1348
1349 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1350 {
1351 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1352 return -ENOTSUPP;
1353
1354 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1355 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1356 return -EIO;
1357 }
1358
1359 return trans->ops->wait_txq_empty(trans, queue);
1360 }
1361
1362 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1363 {
1364 trans->ops->write8(trans, ofs, val);
1365 }
1366
1367 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1368 {
1369 trans->ops->write32(trans, ofs, val);
1370 }
1371
1372 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1373 {
1374 return trans->ops->read32(trans, ofs);
1375 }
1376
1377 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1378 {
1379 return trans->ops->read_prph(trans, ofs);
1380 }
1381
1382 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1383 u32 val)
1384 {
1385 return trans->ops->write_prph(trans, ofs, val);
1386 }
1387
1388 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1389 void *buf, int dwords)
1390 {
1391 return trans->ops->read_mem(trans, addr, buf, dwords);
1392 }
1393
1394 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1395 do { \
1396 if (__builtin_constant_p(bufsize)) \
1397 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1398 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1399 } while (0)
1400
1401 static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
1402 u32 dst_addr, u64 src_addr,
1403 u32 byte_cnt)
1404 {
1405 if (trans->ops->imr_dma_data)
1406 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
1407 return 0;
1408 }
1409
1410 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1411 {
1412 u32 value;
1413
1414 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1415 return 0xa5a5a5a5;
1416
1417 return value;
1418 }
1419
1420 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1421 const void *buf, int dwords)
1422 {
1423 return trans->ops->write_mem(trans, addr, buf, dwords);
1424 }
1425
1426 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1427 u32 val)
1428 {
1429 return iwl_trans_write_mem(trans, addr, &val, 1);
1430 }
1431
1432 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1433 {
1434 if (trans->ops->set_pmi)
1435 trans->ops->set_pmi(trans, state);
1436 }
1437
1438 static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1439 bool retake_ownership)
1440 {
1441 if (trans->ops->sw_reset)
1442 return trans->ops->sw_reset(trans, retake_ownership);
1443 return 0;
1444 }
1445
1446 static inline void
1447 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1448 {
1449 trans->ops->set_bits_mask(trans, reg, mask, value);
1450 }
1451
1452 #define iwl_trans_grab_nic_access(trans) \
1453 __cond_lock(nic_access, \
1454 likely((trans)->ops->grab_nic_access(trans)))
1455
1456 static inline void __releases(nic_access)
1457 iwl_trans_release_nic_access(struct iwl_trans *trans)
1458 {
1459 trans->ops->release_nic_access(trans);
1460 __release(nic_access);
1461 }
1462
1463 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1464 {
1465 if (WARN_ON_ONCE(!trans->op_mode))
1466 return;
1467
1468
1469 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1470 iwl_op_mode_nic_error(trans->op_mode, sync);
1471 trans->state = IWL_TRANS_NO_FW;
1472 }
1473 }
1474
1475 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1476 {
1477 return trans->state == IWL_TRANS_FW_ALIVE;
1478 }
1479
1480 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1481 {
1482 if (trans->ops->sync_nmi)
1483 trans->ops->sync_nmi(trans);
1484 }
1485
1486 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1487 u32 sw_err_bit);
1488
1489 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1490 const void *data, u32 len)
1491 {
1492 if (trans->ops->set_pnvm) {
1493 int ret = trans->ops->set_pnvm(trans, data, len);
1494
1495 if (ret)
1496 return ret;
1497 }
1498
1499 trans->pnvm_loaded = true;
1500
1501 return 0;
1502 }
1503
1504 static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
1505 const void *data, u32 len)
1506 {
1507 if (trans->ops->set_reduce_power) {
1508 int ret = trans->ops->set_reduce_power(trans, data, len);
1509
1510 if (ret)
1511 return ret;
1512 }
1513
1514 trans->reduce_power_loaded = true;
1515 return 0;
1516 }
1517
1518 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1519 {
1520 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1521 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1522 }
1523
1524 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1525 {
1526 if (trans->ops->interrupts)
1527 trans->ops->interrupts(trans, enable);
1528 }
1529
1530
1531
1532
1533 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1534 struct device *dev,
1535 const struct iwl_trans_ops *ops,
1536 const struct iwl_cfg_trans_params *cfg_trans);
1537 int iwl_trans_init(struct iwl_trans *trans);
1538 void iwl_trans_free(struct iwl_trans *trans);
1539
1540
1541
1542
1543 int __must_check iwl_pci_register_driver(void);
1544 void iwl_pci_unregister_driver(void);
1545
1546 #endif