0001
0002
0003
0004
0005
0006
0007 #include <linux/pci.h>
0008
0009 #include <linux/kthread.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/pm_runtime.h>
0012 #include <linux/sizes.h>
0013
0014 #include "mei_dev.h"
0015 #include "hbm.h"
0016
0017 #include "hw-me.h"
0018 #include "hw-me-regs.h"
0019
0020 #include "mei-trace.h"
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
0031 unsigned long offset)
0032 {
0033 return ioread32(hw->mem_addr + offset);
0034 }
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
0045 unsigned long offset, u32 value)
0046 {
0047 iowrite32(value, hw->mem_addr + offset);
0048 }
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
0059 {
0060 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069 static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
0070 {
0071 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081 static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
0082 {
0083 u32 reg;
0084
0085 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
0086 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
0087
0088 return reg;
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098 static inline u32 mei_hcsr_read(const struct mei_device *dev)
0099 {
0100 u32 reg;
0101
0102 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
0103 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
0104
0105 return reg;
0106 }
0107
0108
0109
0110
0111
0112
0113
0114 static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
0115 {
0116 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
0117 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
0128 {
0129 reg &= ~H_CSR_IS_MASK;
0130 mei_hcsr_write(dev, reg);
0131 }
0132
0133
0134
0135
0136
0137
0138 static inline void mei_hcsr_set_hig(struct mei_device *dev)
0139 {
0140 u32 hcsr;
0141
0142 hcsr = mei_hcsr_read(dev) | H_IG;
0143 mei_hcsr_set(dev, hcsr);
0144 }
0145
0146
0147
0148
0149
0150
0151
0152
0153 static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
0154 {
0155 u32 reg;
0156
0157 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
0158 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
0159
0160 return reg;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169 static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
0170 {
0171 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
0172 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
0184 {
0185 struct mei_me_hw *hw = to_me_hw(dev);
0186
0187 if (!hw->cfg->hw_trc_supported)
0188 return -EOPNOTSUPP;
0189
0190 *trc = mei_me_reg_read(hw, ME_TRC);
0191 trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
0192
0193 return 0;
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 static int mei_me_fw_status(struct mei_device *dev,
0205 struct mei_fw_status *fw_status)
0206 {
0207 struct mei_me_hw *hw = to_me_hw(dev);
0208 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
0209 int ret;
0210 int i;
0211
0212 if (!fw_status || !hw->read_fws)
0213 return -EINVAL;
0214
0215 fw_status->count = fw_src->count;
0216 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
0217 ret = hw->read_fws(dev, fw_src->status[i],
0218 &fw_status->status[i]);
0219 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
0220 fw_src->status[i],
0221 fw_status->status[i]);
0222 if (ret)
0223 return ret;
0224 }
0225
0226 return 0;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 static int mei_me_hw_config(struct mei_device *dev)
0240 {
0241 struct mei_me_hw *hw = to_me_hw(dev);
0242 u32 hcsr, reg;
0243
0244 if (WARN_ON(!hw->read_fws))
0245 return -EINVAL;
0246
0247
0248 hcsr = mei_hcsr_read(dev);
0249 hw->hbuf_depth = (hcsr & H_CBD) >> 24;
0250
0251 reg = 0;
0252 hw->read_fws(dev, PCI_CFG_HFS_1, ®);
0253 trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
0254 hw->d0i3_supported =
0255 ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
0256
0257 hw->pg_state = MEI_PG_OFF;
0258 if (hw->d0i3_supported) {
0259 reg = mei_me_d0i3c_read(dev);
0260 if (reg & H_D0I3C_I3)
0261 hw->pg_state = MEI_PG_ON;
0262 }
0263
0264 return 0;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
0276 {
0277 struct mei_me_hw *hw = to_me_hw(dev);
0278
0279 return hw->pg_state;
0280 }
0281
0282 static inline u32 me_intr_src(u32 hcsr)
0283 {
0284 return hcsr & H_CSR_IS_MASK;
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294 static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
0295 {
0296 hcsr &= ~H_CSR_IE_MASK;
0297 mei_hcsr_set(dev, hcsr);
0298 }
0299
0300
0301
0302
0303
0304
0305
0306 static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
0307 {
0308 if (me_intr_src(hcsr))
0309 mei_hcsr_write(dev, hcsr);
0310 }
0311
0312
0313
0314
0315
0316
0317 static void mei_me_intr_clear(struct mei_device *dev)
0318 {
0319 u32 hcsr = mei_hcsr_read(dev);
0320
0321 me_intr_clear(dev, hcsr);
0322 }
0323
0324
0325
0326
0327
0328 static void mei_me_intr_enable(struct mei_device *dev)
0329 {
0330 u32 hcsr = mei_hcsr_read(dev);
0331
0332 hcsr |= H_CSR_IE_MASK;
0333 mei_hcsr_set(dev, hcsr);
0334 }
0335
0336
0337
0338
0339
0340
0341 static void mei_me_intr_disable(struct mei_device *dev)
0342 {
0343 u32 hcsr = mei_hcsr_read(dev);
0344
0345 me_intr_disable(dev, hcsr);
0346 }
0347
0348
0349
0350
0351
0352
0353 static void mei_me_synchronize_irq(struct mei_device *dev)
0354 {
0355 struct mei_me_hw *hw = to_me_hw(dev);
0356
0357 synchronize_irq(hw->irq);
0358 }
0359
0360
0361
0362
0363
0364
0365 static void mei_me_hw_reset_release(struct mei_device *dev)
0366 {
0367 u32 hcsr = mei_hcsr_read(dev);
0368
0369 hcsr |= H_IG;
0370 hcsr &= ~H_RST;
0371 mei_hcsr_set(dev, hcsr);
0372 }
0373
0374
0375
0376
0377
0378
0379 static void mei_me_host_set_ready(struct mei_device *dev)
0380 {
0381 u32 hcsr = mei_hcsr_read(dev);
0382
0383 hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
0384 mei_hcsr_set(dev, hcsr);
0385 }
0386
0387
0388
0389
0390
0391
0392
0393 static bool mei_me_host_is_ready(struct mei_device *dev)
0394 {
0395 u32 hcsr = mei_hcsr_read(dev);
0396
0397 return (hcsr & H_RDY) == H_RDY;
0398 }
0399
0400
0401
0402
0403
0404
0405
0406 static bool mei_me_hw_is_ready(struct mei_device *dev)
0407 {
0408 u32 mecsr = mei_me_mecsr_read(dev);
0409
0410 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
0411 }
0412
0413
0414
0415
0416
0417
0418
0419 static bool mei_me_hw_is_resetting(struct mei_device *dev)
0420 {
0421 u32 mecsr = mei_me_mecsr_read(dev);
0422
0423 return (mecsr & ME_RST_HRA) == ME_RST_HRA;
0424 }
0425
0426
0427
0428
0429
0430
0431
0432
0433 static int mei_me_hw_ready_wait(struct mei_device *dev)
0434 {
0435 mutex_unlock(&dev->device_lock);
0436 wait_event_timeout(dev->wait_hw_ready,
0437 dev->recvd_hw_ready,
0438 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
0439 mutex_lock(&dev->device_lock);
0440 if (!dev->recvd_hw_ready) {
0441 dev_err(dev->dev, "wait hw ready failed\n");
0442 return -ETIME;
0443 }
0444
0445 mei_me_hw_reset_release(dev);
0446 dev->recvd_hw_ready = false;
0447 return 0;
0448 }
0449
0450
0451
0452
0453
0454
0455
0456 static int mei_me_hw_start(struct mei_device *dev)
0457 {
0458 int ret = mei_me_hw_ready_wait(dev);
0459
0460 if (ret)
0461 return ret;
0462 dev_dbg(dev->dev, "hw is ready\n");
0463
0464 mei_me_host_set_ready(dev);
0465 return ret;
0466 }
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
0477 {
0478 u32 hcsr;
0479 char read_ptr, write_ptr;
0480
0481 hcsr = mei_hcsr_read(dev);
0482
0483 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
0484 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
0485
0486 return (unsigned char) (write_ptr - read_ptr);
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
0497 {
0498 return mei_hbuf_filled_slots(dev) == 0;
0499 }
0500
0501
0502
0503
0504
0505
0506
0507
0508 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
0509 {
0510 struct mei_me_hw *hw = to_me_hw(dev);
0511 unsigned char filled_slots, empty_slots;
0512
0513 filled_slots = mei_hbuf_filled_slots(dev);
0514 empty_slots = hw->hbuf_depth - filled_slots;
0515
0516
0517 if (filled_slots > hw->hbuf_depth)
0518 return -EOVERFLOW;
0519
0520 return empty_slots;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529
0530 static u32 mei_me_hbuf_depth(const struct mei_device *dev)
0531 {
0532 struct mei_me_hw *hw = to_me_hw(dev);
0533
0534 return hw->hbuf_depth;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 static int mei_me_hbuf_write(struct mei_device *dev,
0549 const void *hdr, size_t hdr_len,
0550 const void *data, size_t data_len)
0551 {
0552 unsigned long rem;
0553 unsigned long i;
0554 const u32 *reg_buf;
0555 u32 dw_cnt;
0556 int empty_slots;
0557
0558 if (WARN_ON(!hdr || !data || hdr_len & 0x3))
0559 return -EINVAL;
0560
0561 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
0562
0563 empty_slots = mei_hbuf_empty_slots(dev);
0564 dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
0565
0566 if (empty_slots < 0)
0567 return -EOVERFLOW;
0568
0569 dw_cnt = mei_data2slots(hdr_len + data_len);
0570 if (dw_cnt > (u32)empty_slots)
0571 return -EMSGSIZE;
0572
0573 reg_buf = hdr;
0574 for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
0575 mei_me_hcbww_write(dev, reg_buf[i]);
0576
0577 reg_buf = data;
0578 for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
0579 mei_me_hcbww_write(dev, reg_buf[i]);
0580
0581 rem = data_len & 0x3;
0582 if (rem > 0) {
0583 u32 reg = 0;
0584
0585 memcpy(®, (const u8 *)data + data_len - rem, rem);
0586 mei_me_hcbww_write(dev, reg);
0587 }
0588
0589 mei_hcsr_set_hig(dev);
0590 if (!mei_me_hw_is_ready(dev))
0591 return -EIO;
0592
0593 return 0;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603 static int mei_me_count_full_read_slots(struct mei_device *dev)
0604 {
0605 u32 me_csr;
0606 char read_ptr, write_ptr;
0607 unsigned char buffer_depth, filled_slots;
0608
0609 me_csr = mei_me_mecsr_read(dev);
0610 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
0611 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
0612 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
0613 filled_slots = (unsigned char) (write_ptr - read_ptr);
0614
0615
0616 if (filled_slots > buffer_depth)
0617 return -EOVERFLOW;
0618
0619 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
0620 return (int)filled_slots;
0621 }
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
0633 unsigned long buffer_length)
0634 {
0635 u32 *reg_buf = (u32 *)buffer;
0636
0637 for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
0638 *reg_buf++ = mei_me_mecbrw_read(dev);
0639
0640 if (buffer_length > 0) {
0641 u32 reg = mei_me_mecbrw_read(dev);
0642
0643 memcpy(reg_buf, ®, buffer_length);
0644 }
0645
0646 mei_hcsr_set_hig(dev);
0647 return 0;
0648 }
0649
0650
0651
0652
0653
0654
0655 static void mei_me_pg_set(struct mei_device *dev)
0656 {
0657 struct mei_me_hw *hw = to_me_hw(dev);
0658 u32 reg;
0659
0660 reg = mei_me_reg_read(hw, H_HPG_CSR);
0661 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
0662
0663 reg |= H_HPG_CSR_PGI;
0664
0665 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
0666 mei_me_reg_write(hw, H_HPG_CSR, reg);
0667 }
0668
0669
0670
0671
0672
0673
0674 static void mei_me_pg_unset(struct mei_device *dev)
0675 {
0676 struct mei_me_hw *hw = to_me_hw(dev);
0677 u32 reg;
0678
0679 reg = mei_me_reg_read(hw, H_HPG_CSR);
0680 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
0681
0682 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
0683
0684 reg |= H_HPG_CSR_PGIHEXR;
0685
0686 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
0687 mei_me_reg_write(hw, H_HPG_CSR, reg);
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
0698 {
0699 struct mei_me_hw *hw = to_me_hw(dev);
0700 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
0701 int ret;
0702
0703 dev->pg_event = MEI_PG_EVENT_WAIT;
0704
0705 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
0706 if (ret)
0707 return ret;
0708
0709 mutex_unlock(&dev->device_lock);
0710 wait_event_timeout(dev->wait_pg,
0711 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
0712 mutex_lock(&dev->device_lock);
0713
0714 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
0715 mei_me_pg_set(dev);
0716 ret = 0;
0717 } else {
0718 ret = -ETIME;
0719 }
0720
0721 dev->pg_event = MEI_PG_EVENT_IDLE;
0722 hw->pg_state = MEI_PG_ON;
0723
0724 return ret;
0725 }
0726
0727
0728
0729
0730
0731
0732
0733
0734 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
0735 {
0736 struct mei_me_hw *hw = to_me_hw(dev);
0737 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
0738 int ret;
0739
0740 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
0741 goto reply;
0742
0743 dev->pg_event = MEI_PG_EVENT_WAIT;
0744
0745 mei_me_pg_unset(dev);
0746
0747 mutex_unlock(&dev->device_lock);
0748 wait_event_timeout(dev->wait_pg,
0749 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
0750 mutex_lock(&dev->device_lock);
0751
0752 reply:
0753 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
0754 ret = -ETIME;
0755 goto out;
0756 }
0757
0758 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
0759 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
0760 if (ret)
0761 return ret;
0762
0763 mutex_unlock(&dev->device_lock);
0764 wait_event_timeout(dev->wait_pg,
0765 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
0766 mutex_lock(&dev->device_lock);
0767
0768 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
0769 ret = 0;
0770 else
0771 ret = -ETIME;
0772
0773 out:
0774 dev->pg_event = MEI_PG_EVENT_IDLE;
0775 hw->pg_state = MEI_PG_OFF;
0776
0777 return ret;
0778 }
0779
0780
0781
0782
0783
0784
0785
0786
0787 static bool mei_me_pg_in_transition(struct mei_device *dev)
0788 {
0789 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
0790 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
0791 }
0792
0793
0794
0795
0796
0797
0798
0799
0800 static bool mei_me_pg_is_enabled(struct mei_device *dev)
0801 {
0802 struct mei_me_hw *hw = to_me_hw(dev);
0803 u32 reg = mei_me_mecsr_read(dev);
0804
0805 if (hw->d0i3_supported)
0806 return true;
0807
0808 if ((reg & ME_PGIC_HRA) == 0)
0809 goto notsupported;
0810
0811 if (!dev->hbm_f_pg_supported)
0812 goto notsupported;
0813
0814 return true;
0815
0816 notsupported:
0817 dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
0818 hw->d0i3_supported,
0819 !!(reg & ME_PGIC_HRA),
0820 dev->version.major_version,
0821 dev->version.minor_version,
0822 HBM_MAJOR_VERSION_PGI,
0823 HBM_MINOR_VERSION_PGI);
0824
0825 return false;
0826 }
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836 static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
0837 {
0838 u32 reg = mei_me_d0i3c_read(dev);
0839
0840 reg |= H_D0I3C_I3;
0841 if (intr)
0842 reg |= H_D0I3C_IR;
0843 else
0844 reg &= ~H_D0I3C_IR;
0845 mei_me_d0i3c_write(dev, reg);
0846
0847 reg = mei_me_d0i3c_read(dev);
0848 return reg;
0849 }
0850
0851
0852
0853
0854
0855
0856
0857
0858 static u32 mei_me_d0i3_unset(struct mei_device *dev)
0859 {
0860 u32 reg = mei_me_d0i3c_read(dev);
0861
0862 reg &= ~H_D0I3C_I3;
0863 reg |= H_D0I3C_IR;
0864 mei_me_d0i3c_write(dev, reg);
0865
0866 reg = mei_me_d0i3c_read(dev);
0867 return reg;
0868 }
0869
0870
0871
0872
0873
0874
0875
0876
0877 static int mei_me_d0i3_enter_sync(struct mei_device *dev)
0878 {
0879 struct mei_me_hw *hw = to_me_hw(dev);
0880 unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
0881 unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
0882 int ret;
0883 u32 reg;
0884
0885 reg = mei_me_d0i3c_read(dev);
0886 if (reg & H_D0I3C_I3) {
0887
0888 dev_dbg(dev->dev, "d0i3 set not needed\n");
0889 ret = 0;
0890 goto on;
0891 }
0892
0893
0894 dev->pg_event = MEI_PG_EVENT_WAIT;
0895
0896 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
0897 if (ret)
0898
0899 goto out;
0900
0901 mutex_unlock(&dev->device_lock);
0902 wait_event_timeout(dev->wait_pg,
0903 dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
0904 mutex_lock(&dev->device_lock);
0905
0906 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
0907 ret = -ETIME;
0908 goto out;
0909 }
0910
0911
0912 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
0913
0914 reg = mei_me_d0i3_set(dev, true);
0915 if (!(reg & H_D0I3C_CIP)) {
0916 dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
0917 ret = 0;
0918 goto on;
0919 }
0920
0921 mutex_unlock(&dev->device_lock);
0922 wait_event_timeout(dev->wait_pg,
0923 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
0924 mutex_lock(&dev->device_lock);
0925
0926 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
0927 reg = mei_me_d0i3c_read(dev);
0928 if (!(reg & H_D0I3C_I3)) {
0929 ret = -ETIME;
0930 goto out;
0931 }
0932 }
0933
0934 ret = 0;
0935 on:
0936 hw->pg_state = MEI_PG_ON;
0937 out:
0938 dev->pg_event = MEI_PG_EVENT_IDLE;
0939 dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
0940 return ret;
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953 static int mei_me_d0i3_enter(struct mei_device *dev)
0954 {
0955 struct mei_me_hw *hw = to_me_hw(dev);
0956 u32 reg;
0957
0958 reg = mei_me_d0i3c_read(dev);
0959 if (reg & H_D0I3C_I3) {
0960
0961 dev_dbg(dev->dev, "already d0i3 : set not needed\n");
0962 goto on;
0963 }
0964
0965 mei_me_d0i3_set(dev, false);
0966 on:
0967 hw->pg_state = MEI_PG_ON;
0968 dev->pg_event = MEI_PG_EVENT_IDLE;
0969 dev_dbg(dev->dev, "d0i3 enter\n");
0970 return 0;
0971 }
0972
0973
0974
0975
0976
0977
0978
0979
0980 static int mei_me_d0i3_exit_sync(struct mei_device *dev)
0981 {
0982 struct mei_me_hw *hw = to_me_hw(dev);
0983 unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
0984 int ret;
0985 u32 reg;
0986
0987 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
0988
0989 reg = mei_me_d0i3c_read(dev);
0990 if (!(reg & H_D0I3C_I3)) {
0991
0992 dev_dbg(dev->dev, "d0i3 exit not needed\n");
0993 ret = 0;
0994 goto off;
0995 }
0996
0997 reg = mei_me_d0i3_unset(dev);
0998 if (!(reg & H_D0I3C_CIP)) {
0999 dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
1000 ret = 0;
1001 goto off;
1002 }
1003
1004 mutex_unlock(&dev->device_lock);
1005 wait_event_timeout(dev->wait_pg,
1006 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
1007 mutex_lock(&dev->device_lock);
1008
1009 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
1010 reg = mei_me_d0i3c_read(dev);
1011 if (reg & H_D0I3C_I3) {
1012 ret = -ETIME;
1013 goto out;
1014 }
1015 }
1016
1017 ret = 0;
1018 off:
1019 hw->pg_state = MEI_PG_OFF;
1020 out:
1021 dev->pg_event = MEI_PG_EVENT_IDLE;
1022
1023 dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
1024 return ret;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033 static void mei_me_pg_legacy_intr(struct mei_device *dev)
1034 {
1035 struct mei_me_hw *hw = to_me_hw(dev);
1036
1037 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
1038 return;
1039
1040 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1041 hw->pg_state = MEI_PG_OFF;
1042 if (waitqueue_active(&dev->wait_pg))
1043 wake_up(&dev->wait_pg);
1044 }
1045
1046
1047
1048
1049
1050
1051
1052 static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
1053 {
1054 struct mei_me_hw *hw = to_me_hw(dev);
1055
1056 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
1057 (intr_source & H_D0I3C_IS)) {
1058 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
1059 if (hw->pg_state == MEI_PG_ON) {
1060 hw->pg_state = MEI_PG_OFF;
1061 if (dev->hbm_state != MEI_HBM_IDLE) {
1062
1063
1064
1065
1066 dev_dbg(dev->dev, "d0i3 set host ready\n");
1067 mei_me_host_set_ready(dev);
1068 }
1069 } else {
1070 hw->pg_state = MEI_PG_ON;
1071 }
1072
1073 wake_up(&dev->wait_pg);
1074 }
1075
1076 if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
1077
1078
1079
1080
1081
1082 dev_dbg(dev->dev, "d0i3 want resume\n");
1083 mei_hbm_pg_resume(dev);
1084 }
1085 }
1086
1087
1088
1089
1090
1091
1092
1093 static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
1094 {
1095 struct mei_me_hw *hw = to_me_hw(dev);
1096
1097 if (hw->d0i3_supported)
1098 mei_me_d0i3_intr(dev, intr_source);
1099 else
1100 mei_me_pg_legacy_intr(dev);
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110 int mei_me_pg_enter_sync(struct mei_device *dev)
1111 {
1112 struct mei_me_hw *hw = to_me_hw(dev);
1113
1114 if (hw->d0i3_supported)
1115 return mei_me_d0i3_enter_sync(dev);
1116 else
1117 return mei_me_pg_legacy_enter_sync(dev);
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127 int mei_me_pg_exit_sync(struct mei_device *dev)
1128 {
1129 struct mei_me_hw *hw = to_me_hw(dev);
1130
1131 if (hw->d0i3_supported)
1132 return mei_me_d0i3_exit_sync(dev);
1133 else
1134 return mei_me_pg_legacy_exit_sync(dev);
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
1146 {
1147 struct mei_me_hw *hw = to_me_hw(dev);
1148 int ret;
1149 u32 hcsr;
1150
1151 if (intr_enable) {
1152 mei_me_intr_enable(dev);
1153 if (hw->d0i3_supported) {
1154 ret = mei_me_d0i3_exit_sync(dev);
1155 if (ret)
1156 return ret;
1157 } else {
1158 hw->pg_state = MEI_PG_OFF;
1159 }
1160 }
1161
1162 pm_runtime_set_active(dev->dev);
1163
1164 hcsr = mei_hcsr_read(dev);
1165
1166
1167
1168
1169
1170 if ((hcsr & H_RST) == H_RST) {
1171 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
1172 hcsr &= ~H_RST;
1173 mei_hcsr_set(dev, hcsr);
1174 hcsr = mei_hcsr_read(dev);
1175 }
1176
1177 hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
1178
1179 if (!intr_enable)
1180 hcsr &= ~H_CSR_IE_MASK;
1181
1182 dev->recvd_hw_ready = false;
1183 mei_hcsr_write(dev, hcsr);
1184
1185
1186
1187
1188
1189 hcsr = mei_hcsr_read(dev);
1190
1191 if ((hcsr & H_RST) == 0)
1192 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
1193
1194 if ((hcsr & H_RDY) == H_RDY)
1195 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
1196
1197 if (!intr_enable) {
1198 mei_me_hw_reset_release(dev);
1199 if (hw->d0i3_supported) {
1200 ret = mei_me_d0i3_enter(dev);
1201 if (ret)
1202 return ret;
1203 }
1204 }
1205 return 0;
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1217 {
1218 struct mei_device *dev = (struct mei_device *)dev_id;
1219 u32 hcsr;
1220
1221 hcsr = mei_hcsr_read(dev);
1222 if (!me_intr_src(hcsr))
1223 return IRQ_NONE;
1224
1225 dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
1226
1227
1228 me_intr_disable(dev, hcsr);
1229 return IRQ_WAKE_THREAD;
1230 }
1231 EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1244 {
1245 struct mei_device *dev = (struct mei_device *) dev_id;
1246 struct list_head cmpl_list;
1247 s32 slots;
1248 u32 hcsr;
1249 int rets = 0;
1250
1251 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
1252
1253 mutex_lock(&dev->device_lock);
1254
1255 hcsr = mei_hcsr_read(dev);
1256 me_intr_clear(dev, hcsr);
1257
1258 INIT_LIST_HEAD(&cmpl_list);
1259
1260
1261 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
1262 dev_warn(dev->dev, "FW not ready: resetting.\n");
1263 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
1264 dev->dev_state == MEI_DEV_POWER_DOWN)
1265 mei_cl_all_disconnect(dev);
1266 else if (dev->dev_state != MEI_DEV_DISABLED)
1267 schedule_work(&dev->reset_work);
1268 goto end;
1269 }
1270
1271 if (mei_me_hw_is_resetting(dev))
1272 mei_hcsr_set_hig(dev);
1273
1274 mei_me_pg_intr(dev, me_intr_src(hcsr));
1275
1276
1277 if (!mei_host_is_ready(dev)) {
1278 if (mei_hw_is_ready(dev)) {
1279 dev_dbg(dev->dev, "we need to start the dev.\n");
1280 dev->recvd_hw_ready = true;
1281 wake_up(&dev->wait_hw_ready);
1282 } else {
1283 dev_dbg(dev->dev, "Spurious Interrupt\n");
1284 }
1285 goto end;
1286 }
1287
1288 slots = mei_count_full_read_slots(dev);
1289 while (slots > 0) {
1290 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1291 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1292
1293
1294
1295
1296 if (rets == -ENODATA)
1297 break;
1298
1299 if (rets) {
1300 dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
1301 rets, dev->dev_state);
1302 if (dev->dev_state != MEI_DEV_RESETTING &&
1303 dev->dev_state != MEI_DEV_DISABLED &&
1304 dev->dev_state != MEI_DEV_POWERING_DOWN &&
1305 dev->dev_state != MEI_DEV_POWER_DOWN)
1306 schedule_work(&dev->reset_work);
1307 goto end;
1308 }
1309 }
1310
1311 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1312
1313
1314
1315
1316
1317
1318 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1319 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1320 rets = mei_irq_write_handler(dev, &cmpl_list);
1321 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1322 }
1323
1324 mei_irq_compl_handler(dev, &cmpl_list);
1325
1326 end:
1327 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
1328 mei_me_intr_enable(dev);
1329 mutex_unlock(&dev->device_lock);
1330 return IRQ_HANDLED;
1331 }
1332 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
1333
1334 static const struct mei_hw_ops mei_me_hw_ops = {
1335
1336 .trc_status = mei_me_trc_status,
1337 .fw_status = mei_me_fw_status,
1338 .pg_state = mei_me_pg_state,
1339
1340 .host_is_ready = mei_me_host_is_ready,
1341
1342 .hw_is_ready = mei_me_hw_is_ready,
1343 .hw_reset = mei_me_hw_reset,
1344 .hw_config = mei_me_hw_config,
1345 .hw_start = mei_me_hw_start,
1346
1347 .pg_in_transition = mei_me_pg_in_transition,
1348 .pg_is_enabled = mei_me_pg_is_enabled,
1349
1350 .intr_clear = mei_me_intr_clear,
1351 .intr_enable = mei_me_intr_enable,
1352 .intr_disable = mei_me_intr_disable,
1353 .synchronize_irq = mei_me_synchronize_irq,
1354
1355 .hbuf_free_slots = mei_me_hbuf_empty_slots,
1356 .hbuf_is_ready = mei_me_hbuf_is_empty,
1357 .hbuf_depth = mei_me_hbuf_depth,
1358
1359 .write = mei_me_hbuf_write,
1360
1361 .rdbuf_full_slots = mei_me_count_full_read_slots,
1362 .read_hdr = mei_me_mecbrw_read,
1363 .read = mei_me_read_slots
1364 };
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
1378 {
1379 u32 reg;
1380 unsigned int devfn;
1381
1382 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1383 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®);
1384 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
1385
1386 return (reg & 0x600) == 0x200;
1387 }
1388
1389 #define MEI_CFG_FW_NM \
1390 .quirk_probe = mei_me_fw_type_nm
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
1404 {
1405 u32 reg;
1406 unsigned int devfn;
1407
1408 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1409 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
1410 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1411 return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
1412 }
1413
1414 #define MEI_CFG_FW_SPS_4 \
1415 .quirk_probe = mei_me_fw_type_sps_4
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
1428 {
1429 u32 reg;
1430 u32 fw_type;
1431 unsigned int devfn;
1432
1433 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1434 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®);
1435 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
1436 fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
1437
1438 dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
1439
1440 return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
1441 fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
1442 }
1443
1444 #define MEI_CFG_KIND_ITOUCH \
1445 .kind = "itouch"
1446
1447 #define MEI_CFG_TYPE_GSC \
1448 .kind = "gsc"
1449
1450 #define MEI_CFG_TYPE_GSCFI \
1451 .kind = "gscfi"
1452
1453 #define MEI_CFG_FW_SPS_IGN \
1454 .quirk_probe = mei_me_fw_type_sps_ign
1455
1456 #define MEI_CFG_FW_VER_SUPP \
1457 .fw_ver_supported = 1
1458
1459 #define MEI_CFG_ICH_HFS \
1460 .fw_status.count = 0
1461
1462 #define MEI_CFG_ICH10_HFS \
1463 .fw_status.count = 1, \
1464 .fw_status.status[0] = PCI_CFG_HFS_1
1465
1466 #define MEI_CFG_PCH_HFS \
1467 .fw_status.count = 2, \
1468 .fw_status.status[0] = PCI_CFG_HFS_1, \
1469 .fw_status.status[1] = PCI_CFG_HFS_2
1470
1471 #define MEI_CFG_PCH8_HFS \
1472 .fw_status.count = 6, \
1473 .fw_status.status[0] = PCI_CFG_HFS_1, \
1474 .fw_status.status[1] = PCI_CFG_HFS_2, \
1475 .fw_status.status[2] = PCI_CFG_HFS_3, \
1476 .fw_status.status[3] = PCI_CFG_HFS_4, \
1477 .fw_status.status[4] = PCI_CFG_HFS_5, \
1478 .fw_status.status[5] = PCI_CFG_HFS_6
1479
1480 #define MEI_CFG_DMA_128 \
1481 .dma_size[DMA_DSCR_HOST] = SZ_128K, \
1482 .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
1483 .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
1484
1485 #define MEI_CFG_TRC \
1486 .hw_trc_supported = 1
1487
1488
1489 static const struct mei_cfg mei_me_ich_cfg = {
1490 MEI_CFG_ICH_HFS,
1491 };
1492
1493
1494 static const struct mei_cfg mei_me_ich10_cfg = {
1495 MEI_CFG_ICH10_HFS,
1496 };
1497
1498
1499 static const struct mei_cfg mei_me_pch6_cfg = {
1500 MEI_CFG_PCH_HFS,
1501 };
1502
1503
1504 static const struct mei_cfg mei_me_pch7_cfg = {
1505 MEI_CFG_PCH_HFS,
1506 MEI_CFG_FW_VER_SUPP,
1507 };
1508
1509
1510 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
1511 MEI_CFG_PCH_HFS,
1512 MEI_CFG_FW_VER_SUPP,
1513 MEI_CFG_FW_NM,
1514 };
1515
1516
1517 static const struct mei_cfg mei_me_pch8_cfg = {
1518 MEI_CFG_PCH8_HFS,
1519 MEI_CFG_FW_VER_SUPP,
1520 };
1521
1522
1523 static const struct mei_cfg mei_me_pch8_itouch_cfg = {
1524 MEI_CFG_KIND_ITOUCH,
1525 MEI_CFG_PCH8_HFS,
1526 MEI_CFG_FW_VER_SUPP,
1527 };
1528
1529
1530 static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
1531 MEI_CFG_PCH8_HFS,
1532 MEI_CFG_FW_VER_SUPP,
1533 MEI_CFG_FW_SPS_4,
1534 };
1535
1536
1537 static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
1538 MEI_CFG_PCH8_HFS,
1539 MEI_CFG_FW_VER_SUPP,
1540 MEI_CFG_FW_SPS_4,
1541 };
1542
1543
1544 static const struct mei_cfg mei_me_pch12_cfg = {
1545 MEI_CFG_PCH8_HFS,
1546 MEI_CFG_FW_VER_SUPP,
1547 MEI_CFG_DMA_128,
1548 };
1549
1550
1551 static const struct mei_cfg mei_me_pch12_sps_cfg = {
1552 MEI_CFG_PCH8_HFS,
1553 MEI_CFG_FW_VER_SUPP,
1554 MEI_CFG_DMA_128,
1555 MEI_CFG_FW_SPS_IGN,
1556 };
1557
1558
1559
1560
1561 static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
1562 MEI_CFG_KIND_ITOUCH,
1563 MEI_CFG_PCH8_HFS,
1564 MEI_CFG_FW_VER_SUPP,
1565 MEI_CFG_FW_SPS_IGN,
1566 };
1567
1568
1569 static const struct mei_cfg mei_me_pch15_cfg = {
1570 MEI_CFG_PCH8_HFS,
1571 MEI_CFG_FW_VER_SUPP,
1572 MEI_CFG_DMA_128,
1573 MEI_CFG_TRC,
1574 };
1575
1576
1577 static const struct mei_cfg mei_me_pch15_sps_cfg = {
1578 MEI_CFG_PCH8_HFS,
1579 MEI_CFG_FW_VER_SUPP,
1580 MEI_CFG_DMA_128,
1581 MEI_CFG_TRC,
1582 MEI_CFG_FW_SPS_IGN,
1583 };
1584
1585
1586 static const struct mei_cfg mei_me_gsc_cfg = {
1587 MEI_CFG_TYPE_GSC,
1588 MEI_CFG_PCH8_HFS,
1589 MEI_CFG_FW_VER_SUPP,
1590 };
1591
1592
1593 static const struct mei_cfg mei_me_gscfi_cfg = {
1594 MEI_CFG_TYPE_GSCFI,
1595 MEI_CFG_PCH8_HFS,
1596 MEI_CFG_FW_VER_SUPP,
1597 };
1598
1599
1600
1601
1602
1603 static const struct mei_cfg *const mei_cfg_list[] = {
1604 [MEI_ME_UNDEF_CFG] = NULL,
1605 [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
1606 [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
1607 [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
1608 [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
1609 [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
1610 [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
1611 [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
1612 [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
1613 [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
1614 [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
1615 [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
1616 [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
1617 [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
1618 [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
1619 [MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
1620 [MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
1621 };
1622
1623 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
1624 {
1625 BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
1626
1627 if (idx >= MEI_ME_NUM_CFG)
1628 return NULL;
1629
1630 return mei_cfg_list[idx];
1631 }
1632 EXPORT_SYMBOL_GPL(mei_me_get_cfg);
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642 struct mei_device *mei_me_dev_init(struct device *parent,
1643 const struct mei_cfg *cfg)
1644 {
1645 struct mei_device *dev;
1646 struct mei_me_hw *hw;
1647 int i;
1648
1649 dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
1650 if (!dev)
1651 return NULL;
1652
1653 hw = to_me_hw(dev);
1654
1655 for (i = 0; i < DMA_DSCR_NUM; i++)
1656 dev->dr_dscr[i].size = cfg->dma_size[i];
1657
1658 mei_device_init(dev, parent, &mei_me_hw_ops);
1659 hw->cfg = cfg;
1660
1661 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
1662
1663 dev->kind = cfg->kind;
1664
1665 return dev;
1666 }
1667 EXPORT_SYMBOL_GPL(mei_me_dev_init);