0001
0002
0003
0004 #include <linux/acpi.h>
0005 #include <linux/aer.h>
0006 #include <linux/bitops.h>
0007 #include <linux/debugfs.h>
0008 #include <linux/init.h>
0009 #include <linux/io.h>
0010 #include <linux/iommu.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/pci.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/seq_file.h>
0016 #include <linux/topology.h>
0017 #include <linux/uacce.h>
0018
0019 #include "sec.h"
0020
0021 #define SEC_VF_NUM 63
0022 #define SEC_QUEUE_NUM_V1 4096
0023 #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
0024
0025 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
0026 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
0027 #define SEC_BD_ERR_CHK_EN3 0xffffbfff
0028
0029 #define SEC_SQE_SIZE 128
0030 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
0031 #define SEC_PF_DEF_Q_NUM 256
0032 #define SEC_PF_DEF_Q_BASE 0
0033 #define SEC_CTX_Q_NUM_DEF 2
0034 #define SEC_CTX_Q_NUM_MAX 32
0035
0036 #define SEC_CTRL_CNT_CLR_CE 0x301120
0037 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
0038 #define SEC_CORE_INT_SOURCE 0x301010
0039 #define SEC_CORE_INT_MASK 0x301000
0040 #define SEC_CORE_INT_STATUS 0x301008
0041 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
0042 #define SEC_ECC_NUM 16
0043 #define SEC_ECC_MASH 0xFF
0044 #define SEC_CORE_INT_DISABLE 0x0
0045 #define SEC_CORE_INT_ENABLE 0x7c1ff
0046 #define SEC_CORE_INT_CLEAR 0x7c1ff
0047 #define SEC_SAA_ENABLE 0x17f
0048
0049 #define SEC_RAS_CE_REG 0x301050
0050 #define SEC_RAS_FE_REG 0x301054
0051 #define SEC_RAS_NFE_REG 0x301058
0052 #define SEC_RAS_CE_ENB_MSK 0x88
0053 #define SEC_RAS_FE_ENB_MSK 0x0
0054 #define SEC_RAS_NFE_ENB_MSK 0x7c177
0055 #define SEC_OOO_SHUTDOWN_SEL 0x301014
0056 #define SEC_RAS_DISABLE 0x0
0057 #define SEC_MEM_START_INIT_REG 0x301100
0058 #define SEC_MEM_INIT_DONE_REG 0x301104
0059
0060
0061 #define SEC_CONTROL_REG 0x301200
0062 #define SEC_DYNAMIC_GATE_REG 0x30121c
0063 #define SEC_CORE_AUTO_GATE 0x30212c
0064 #define SEC_DYNAMIC_GATE_EN 0x7bff
0065 #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0)
0066 #define SEC_CLK_GATE_ENABLE BIT(3)
0067 #define SEC_CLK_GATE_DISABLE (~BIT(3))
0068
0069 #define SEC_TRNG_EN_SHIFT 8
0070 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
0071 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
0072
0073 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220
0074 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224
0075 #define SEC_SAA_EN_REG 0x301270
0076 #define SEC_BD_ERR_CHK_EN_REG0 0x301380
0077 #define SEC_BD_ERR_CHK_EN_REG1 0x301384
0078 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c
0079
0080 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
0081 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
0082 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)
0083 #define SEC_USER1_ENABLE_DATA_SSV BIT(16)
0084 #define SEC_USER1_WB_CONTEXT_SSV BIT(8)
0085 #define SEC_USER1_WB_DATA_SSV BIT(0)
0086 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \
0087 SEC_USER1_ENABLE_DATA_SSV | \
0088 SEC_USER1_WB_CONTEXT_SSV | \
0089 SEC_USER1_WB_DATA_SSV)
0090 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
0091 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
0092 #define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220
0093 #define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224
0094 #define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))
0095 #define SEC_USER1_SMMU_MASK_V3 0xFF79E79E
0096 #define SEC_CORE_INT_STATUS_M_ECC BIT(2)
0097
0098 #define SEC_PREFETCH_CFG 0x301130
0099 #define SEC_SVA_TRANS 0x301EC4
0100 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
0101 #define SEC_PREFETCH_DISABLE BIT(1)
0102 #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
0103
0104 #define SEC_DELAY_10_US 10
0105 #define SEC_POLL_TIMEOUT_US 1000
0106 #define SEC_DBGFS_VAL_MAX_LEN 20
0107 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060
0108
0109 #define SEC_SQE_MASK_OFFSET 64
0110 #define SEC_SQE_MASK_LEN 48
0111 #define SEC_SHAPER_TYPE_RATE 400
0112
0113 #define SEC_DFX_BASE 0x301000
0114 #define SEC_DFX_CORE 0x302100
0115 #define SEC_DFX_COMMON1 0x301600
0116 #define SEC_DFX_COMMON2 0x301C00
0117 #define SEC_DFX_BASE_LEN 0x9D
0118 #define SEC_DFX_CORE_LEN 0x32B
0119 #define SEC_DFX_COMMON1_LEN 0x45
0120 #define SEC_DFX_COMMON2_LEN 0xBA
0121
0122 struct sec_hw_error {
0123 u32 int_msk;
0124 const char *msg;
0125 };
0126
0127 struct sec_dfx_item {
0128 const char *name;
0129 u32 offset;
0130 };
0131
0132 static const char sec_name[] = "hisi_sec2";
0133 static struct dentry *sec_debugfs_root;
0134
0135 static struct hisi_qm_list sec_devices = {
0136 .register_to_crypto = sec_register_to_crypto,
0137 .unregister_from_crypto = sec_unregister_from_crypto,
0138 };
0139
0140 static const struct sec_hw_error sec_hw_errors[] = {
0141 {
0142 .int_msk = BIT(0),
0143 .msg = "sec_axi_rresp_err_rint"
0144 },
0145 {
0146 .int_msk = BIT(1),
0147 .msg = "sec_axi_bresp_err_rint"
0148 },
0149 {
0150 .int_msk = BIT(2),
0151 .msg = "sec_ecc_2bit_err_rint"
0152 },
0153 {
0154 .int_msk = BIT(3),
0155 .msg = "sec_ecc_1bit_err_rint"
0156 },
0157 {
0158 .int_msk = BIT(4),
0159 .msg = "sec_req_trng_timeout_rint"
0160 },
0161 {
0162 .int_msk = BIT(5),
0163 .msg = "sec_fsm_hbeat_rint"
0164 },
0165 {
0166 .int_msk = BIT(6),
0167 .msg = "sec_channel_req_rng_timeout_rint"
0168 },
0169 {
0170 .int_msk = BIT(7),
0171 .msg = "sec_bd_err_rint"
0172 },
0173 {
0174 .int_msk = BIT(8),
0175 .msg = "sec_chain_buff_err_rint"
0176 },
0177 {
0178 .int_msk = BIT(14),
0179 .msg = "sec_no_secure_access"
0180 },
0181 {
0182 .int_msk = BIT(15),
0183 .msg = "sec_wrapping_key_auth_err"
0184 },
0185 {
0186 .int_msk = BIT(16),
0187 .msg = "sec_km_key_crc_fail"
0188 },
0189 {
0190 .int_msk = BIT(17),
0191 .msg = "sec_axi_poison_err"
0192 },
0193 {
0194 .int_msk = BIT(18),
0195 .msg = "sec_sva_err"
0196 },
0197 {}
0198 };
0199
0200 static const char * const sec_dbg_file_name[] = {
0201 [SEC_CLEAR_ENABLE] = "clear_enable",
0202 };
0203
0204 static struct sec_dfx_item sec_dfx_labels[] = {
0205 {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
0206 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
0207 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
0208 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
0209 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
0210 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
0211 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
0212 };
0213
0214 static const struct debugfs_reg32 sec_dfx_regs[] = {
0215 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
0216 {"SEC_SAA_EN ", 0x301270},
0217 {"SEC_BD_LATENCY_MIN ", 0x301600},
0218 {"SEC_BD_LATENCY_MAX ", 0x301608},
0219 {"SEC_BD_LATENCY_AVG ", 0x30160C},
0220 {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
0221 {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
0222 {"SEC_BD_NUM_IN_SEC ", 0x301680},
0223 {"SEC_ECC_1BIT_CNT ", 0x301C00},
0224 {"SEC_ECC_1BIT_INFO ", 0x301C04},
0225 {"SEC_ECC_2BIT_CNT ", 0x301C10},
0226 {"SEC_ECC_2BIT_INFO ", 0x301C14},
0227 {"SEC_BD_SAA0 ", 0x301C20},
0228 {"SEC_BD_SAA1 ", 0x301C24},
0229 {"SEC_BD_SAA2 ", 0x301C28},
0230 {"SEC_BD_SAA3 ", 0x301C2C},
0231 {"SEC_BD_SAA4 ", 0x301C30},
0232 {"SEC_BD_SAA5 ", 0x301C34},
0233 {"SEC_BD_SAA6 ", 0x301C38},
0234 {"SEC_BD_SAA7 ", 0x301C3C},
0235 {"SEC_BD_SAA8 ", 0x301C40},
0236 };
0237
0238
0239 static struct dfx_diff_registers sec_diff_regs[] = {
0240 {
0241 .reg_offset = SEC_DFX_BASE,
0242 .reg_len = SEC_DFX_BASE_LEN,
0243 }, {
0244 .reg_offset = SEC_DFX_COMMON1,
0245 .reg_len = SEC_DFX_COMMON1_LEN,
0246 }, {
0247 .reg_offset = SEC_DFX_COMMON2,
0248 .reg_len = SEC_DFX_COMMON2_LEN,
0249 }, {
0250 .reg_offset = SEC_DFX_CORE,
0251 .reg_len = SEC_DFX_CORE_LEN,
0252 },
0253 };
0254
0255 static int sec_diff_regs_show(struct seq_file *s, void *unused)
0256 {
0257 struct hisi_qm *qm = s->private;
0258
0259 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
0260 ARRAY_SIZE(sec_diff_regs));
0261
0262 return 0;
0263 }
0264 DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
0265
0266 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
0267 {
0268 return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
0269 }
0270
0271 static const struct kernel_param_ops sec_pf_q_num_ops = {
0272 .set = sec_pf_q_num_set,
0273 .get = param_get_int,
0274 };
0275
0276 static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
0277 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
0278 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
0279
0280 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
0281 {
0282 u32 ctx_q_num;
0283 int ret;
0284
0285 if (!val)
0286 return -EINVAL;
0287
0288 ret = kstrtou32(val, 10, &ctx_q_num);
0289 if (ret)
0290 return -EINVAL;
0291
0292 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
0293 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
0294 return -EINVAL;
0295 }
0296
0297 return param_set_int(val, kp);
0298 }
0299
0300 static const struct kernel_param_ops sec_ctx_q_num_ops = {
0301 .set = sec_ctx_q_num_set,
0302 .get = param_get_int,
0303 };
0304 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
0305 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
0306 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
0307
0308 static const struct kernel_param_ops vfs_num_ops = {
0309 .set = vfs_num_set,
0310 .get = param_get_int,
0311 };
0312
0313 static u32 vfs_num;
0314 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
0315 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
0316
0317 void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
0318 {
0319 hisi_qm_free_qps(qps, qp_num);
0320 kfree(qps);
0321 }
0322
0323 struct hisi_qp **sec_create_qps(void)
0324 {
0325 int node = cpu_to_node(smp_processor_id());
0326 u32 ctx_num = ctx_q_num;
0327 struct hisi_qp **qps;
0328 int ret;
0329
0330 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
0331 if (!qps)
0332 return NULL;
0333
0334 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
0335 if (!ret)
0336 return qps;
0337
0338 kfree(qps);
0339 return NULL;
0340 }
0341
0342 static const struct kernel_param_ops sec_uacce_mode_ops = {
0343 .set = uacce_mode_set,
0344 .get = param_get_int,
0345 };
0346
0347
0348
0349
0350
0351 static u32 uacce_mode = UACCE_MODE_NOUACCE;
0352 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
0353 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
0354
0355 static const struct pci_device_id sec_dev_ids[] = {
0356 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) },
0357 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
0358 { 0, }
0359 };
0360 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
0361
0362 static void sec_set_endian(struct hisi_qm *qm)
0363 {
0364 u32 reg;
0365
0366 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
0367 reg &= ~(BIT(1) | BIT(0));
0368 if (!IS_ENABLED(CONFIG_64BIT))
0369 reg |= BIT(1);
0370
0371
0372 if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
0373 reg |= BIT(0);
0374
0375 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
0376 }
0377
0378 static void sec_engine_sva_config(struct hisi_qm *qm)
0379 {
0380 u32 reg;
0381
0382 if (qm->ver > QM_HW_V2) {
0383 reg = readl_relaxed(qm->io_base +
0384 SEC_INTERFACE_USER_CTRL0_REG_V3);
0385 reg |= SEC_USER0_SMMU_NORMAL;
0386 writel_relaxed(reg, qm->io_base +
0387 SEC_INTERFACE_USER_CTRL0_REG_V3);
0388
0389 reg = readl_relaxed(qm->io_base +
0390 SEC_INTERFACE_USER_CTRL1_REG_V3);
0391 reg &= SEC_USER1_SMMU_MASK_V3;
0392 reg |= SEC_USER1_SMMU_NORMAL_V3;
0393 writel_relaxed(reg, qm->io_base +
0394 SEC_INTERFACE_USER_CTRL1_REG_V3);
0395 } else {
0396 reg = readl_relaxed(qm->io_base +
0397 SEC_INTERFACE_USER_CTRL0_REG);
0398 reg |= SEC_USER0_SMMU_NORMAL;
0399 writel_relaxed(reg, qm->io_base +
0400 SEC_INTERFACE_USER_CTRL0_REG);
0401 reg = readl_relaxed(qm->io_base +
0402 SEC_INTERFACE_USER_CTRL1_REG);
0403 reg &= SEC_USER1_SMMU_MASK;
0404 if (qm->use_sva)
0405 reg |= SEC_USER1_SMMU_SVA;
0406 else
0407 reg |= SEC_USER1_SMMU_NORMAL;
0408 writel_relaxed(reg, qm->io_base +
0409 SEC_INTERFACE_USER_CTRL1_REG);
0410 }
0411 }
0412
0413 static void sec_open_sva_prefetch(struct hisi_qm *qm)
0414 {
0415 u32 val;
0416 int ret;
0417
0418 if (qm->ver < QM_HW_V3)
0419 return;
0420
0421
0422 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
0423 val &= SEC_PREFETCH_ENABLE;
0424 writel(val, qm->io_base + SEC_PREFETCH_CFG);
0425
0426 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
0427 val, !(val & SEC_PREFETCH_DISABLE),
0428 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
0429 if (ret)
0430 pci_err(qm->pdev, "failed to open sva prefetch\n");
0431 }
0432
0433 static void sec_close_sva_prefetch(struct hisi_qm *qm)
0434 {
0435 u32 val;
0436 int ret;
0437
0438 if (qm->ver < QM_HW_V3)
0439 return;
0440
0441 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
0442 val |= SEC_PREFETCH_DISABLE;
0443 writel(val, qm->io_base + SEC_PREFETCH_CFG);
0444
0445 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
0446 val, !(val & SEC_SVA_DISABLE_READY),
0447 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
0448 if (ret)
0449 pci_err(qm->pdev, "failed to close sva prefetch\n");
0450 }
0451
0452 static void sec_enable_clock_gate(struct hisi_qm *qm)
0453 {
0454 u32 val;
0455
0456 if (qm->ver < QM_HW_V3)
0457 return;
0458
0459 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
0460 val |= SEC_CLK_GATE_ENABLE;
0461 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
0462
0463 val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
0464 val |= SEC_DYNAMIC_GATE_EN;
0465 writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
0466
0467 val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
0468 val |= SEC_CORE_AUTO_GATE_EN;
0469 writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
0470 }
0471
0472 static void sec_disable_clock_gate(struct hisi_qm *qm)
0473 {
0474 u32 val;
0475
0476
0477 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
0478 val &= SEC_CLK_GATE_DISABLE;
0479 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
0480 }
0481
0482 static int sec_engine_init(struct hisi_qm *qm)
0483 {
0484 int ret;
0485 u32 reg;
0486
0487
0488 sec_disable_clock_gate(qm);
0489
0490 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
0491
0492 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
0493 reg, reg & 0x1, SEC_DELAY_10_US,
0494 SEC_POLL_TIMEOUT_US);
0495 if (ret) {
0496 pci_err(qm->pdev, "fail to init sec mem\n");
0497 return ret;
0498 }
0499
0500 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
0501 reg |= (0x1 << SEC_TRNG_EN_SHIFT);
0502 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
0503
0504 sec_engine_sva_config(qm);
0505
0506 writel(SEC_SINGLE_PORT_MAX_TRANS,
0507 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
0508
0509 writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
0510
0511 if (qm->ver < QM_HW_V3) {
0512
0513 writel_relaxed(SEC_BD_ERR_CHK_EN0,
0514 qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
0515
0516
0517 writel_relaxed(SEC_BD_ERR_CHK_EN1,
0518 qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
0519 writel_relaxed(SEC_BD_ERR_CHK_EN3,
0520 qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
0521 }
0522
0523
0524 sec_set_endian(qm);
0525
0526 sec_enable_clock_gate(qm);
0527
0528 return 0;
0529 }
0530
0531 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
0532 {
0533
0534 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
0535 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
0536 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
0537 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
0538 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
0539
0540
0541 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
0542 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
0543
0544
0545 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
0546 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
0547
0548
0549 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
0550 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
0551 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
0552
0553 return sec_engine_init(qm);
0554 }
0555
0556
0557 static void sec_debug_regs_clear(struct hisi_qm *qm)
0558 {
0559 int i;
0560
0561
0562 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
0563 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
0564 readl(qm->io_base + sec_dfx_regs[i].offset);
0565
0566
0567 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
0568
0569 hisi_qm_debug_regs_clear(qm);
0570 }
0571
0572 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
0573 {
0574 u32 val1, val2;
0575
0576 val1 = readl(qm->io_base + SEC_CONTROL_REG);
0577 if (enable) {
0578 val1 |= SEC_AXI_SHUTDOWN_ENABLE;
0579 val2 = SEC_RAS_NFE_ENB_MSK;
0580 } else {
0581 val1 &= SEC_AXI_SHUTDOWN_DISABLE;
0582 val2 = 0x0;
0583 }
0584
0585 if (qm->ver > QM_HW_V2)
0586 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
0587
0588 writel(val1, qm->io_base + SEC_CONTROL_REG);
0589 }
0590
0591 static void sec_hw_error_enable(struct hisi_qm *qm)
0592 {
0593 if (qm->ver == QM_HW_V1) {
0594 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
0595 pci_info(qm->pdev, "V1 not support hw error handle\n");
0596 return;
0597 }
0598
0599
0600 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
0601
0602
0603 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
0604 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
0605 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
0606
0607
0608 sec_master_ooo_ctrl(qm, true);
0609
0610
0611 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
0612 }
0613
0614 static void sec_hw_error_disable(struct hisi_qm *qm)
0615 {
0616
0617 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
0618
0619
0620 sec_master_ooo_ctrl(qm, false);
0621
0622
0623 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
0624 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
0625 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
0626 }
0627
0628 static u32 sec_clear_enable_read(struct hisi_qm *qm)
0629 {
0630 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
0631 SEC_CTRL_CNT_CLR_CE_BIT;
0632 }
0633
0634 static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
0635 {
0636 u32 tmp;
0637
0638 if (val != 1 && val)
0639 return -EINVAL;
0640
0641 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
0642 ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
0643 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
0644
0645 return 0;
0646 }
0647
0648 static ssize_t sec_debug_read(struct file *filp, char __user *buf,
0649 size_t count, loff_t *pos)
0650 {
0651 struct sec_debug_file *file = filp->private_data;
0652 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
0653 struct hisi_qm *qm = file->qm;
0654 u32 val;
0655 int ret;
0656
0657 ret = hisi_qm_get_dfx_access(qm);
0658 if (ret)
0659 return ret;
0660
0661 spin_lock_irq(&file->lock);
0662
0663 switch (file->index) {
0664 case SEC_CLEAR_ENABLE:
0665 val = sec_clear_enable_read(qm);
0666 break;
0667 default:
0668 goto err_input;
0669 }
0670
0671 spin_unlock_irq(&file->lock);
0672
0673 hisi_qm_put_dfx_access(qm);
0674 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
0675 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
0676
0677 err_input:
0678 spin_unlock_irq(&file->lock);
0679 hisi_qm_put_dfx_access(qm);
0680 return -EINVAL;
0681 }
0682
0683 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
0684 size_t count, loff_t *pos)
0685 {
0686 struct sec_debug_file *file = filp->private_data;
0687 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
0688 struct hisi_qm *qm = file->qm;
0689 unsigned long val;
0690 int len, ret;
0691
0692 if (*pos != 0)
0693 return 0;
0694
0695 if (count >= SEC_DBGFS_VAL_MAX_LEN)
0696 return -ENOSPC;
0697
0698 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
0699 pos, buf, count);
0700 if (len < 0)
0701 return len;
0702
0703 tbuf[len] = '\0';
0704 if (kstrtoul(tbuf, 0, &val))
0705 return -EFAULT;
0706
0707 ret = hisi_qm_get_dfx_access(qm);
0708 if (ret)
0709 return ret;
0710
0711 spin_lock_irq(&file->lock);
0712
0713 switch (file->index) {
0714 case SEC_CLEAR_ENABLE:
0715 ret = sec_clear_enable_write(qm, val);
0716 if (ret)
0717 goto err_input;
0718 break;
0719 default:
0720 ret = -EINVAL;
0721 goto err_input;
0722 }
0723
0724 ret = count;
0725
0726 err_input:
0727 spin_unlock_irq(&file->lock);
0728 hisi_qm_put_dfx_access(qm);
0729 return ret;
0730 }
0731
0732 static const struct file_operations sec_dbg_fops = {
0733 .owner = THIS_MODULE,
0734 .open = simple_open,
0735 .read = sec_debug_read,
0736 .write = sec_debug_write,
0737 };
0738
0739 static int sec_debugfs_atomic64_get(void *data, u64 *val)
0740 {
0741 *val = atomic64_read((atomic64_t *)data);
0742
0743 return 0;
0744 }
0745
0746 static int sec_debugfs_atomic64_set(void *data, u64 val)
0747 {
0748 if (val)
0749 return -EINVAL;
0750
0751 atomic64_set((atomic64_t *)data, 0);
0752
0753 return 0;
0754 }
0755
0756 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
0757 sec_debugfs_atomic64_set, "%lld\n");
0758
0759 static int sec_regs_show(struct seq_file *s, void *unused)
0760 {
0761 hisi_qm_regs_dump(s, s->private);
0762
0763 return 0;
0764 }
0765
0766 DEFINE_SHOW_ATTRIBUTE(sec_regs);
0767
0768 static int sec_core_debug_init(struct hisi_qm *qm)
0769 {
0770 struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
0771 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
0772 struct device *dev = &qm->pdev->dev;
0773 struct sec_dfx *dfx = &sec->debug.dfx;
0774 struct debugfs_regset32 *regset;
0775 struct dentry *tmp_d;
0776 int i;
0777
0778 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
0779
0780 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
0781 if (!regset)
0782 return -ENOMEM;
0783
0784 regset->regs = sec_dfx_regs;
0785 regset->nregs = ARRAY_SIZE(sec_dfx_regs);
0786 regset->base = qm->io_base;
0787 regset->dev = dev;
0788
0789 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
0790 debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
0791 if (qm->fun_type == QM_HW_PF && sec_regs)
0792 debugfs_create_file("diff_regs", 0444, tmp_d,
0793 qm, &sec_diff_regs_fops);
0794
0795 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
0796 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
0797 sec_dfx_labels[i].offset);
0798 debugfs_create_file(sec_dfx_labels[i].name, 0644,
0799 tmp_d, data, &sec_atomic64_ops);
0800 }
0801
0802 return 0;
0803 }
0804
0805 static int sec_debug_init(struct hisi_qm *qm)
0806 {
0807 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
0808 int i;
0809
0810 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {
0811 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
0812 spin_lock_init(&sec->debug.files[i].lock);
0813 sec->debug.files[i].index = i;
0814 sec->debug.files[i].qm = qm;
0815
0816 debugfs_create_file(sec_dbg_file_name[i], 0600,
0817 qm->debug.debug_root,
0818 sec->debug.files + i,
0819 &sec_dbg_fops);
0820 }
0821 }
0822
0823 return sec_core_debug_init(qm);
0824 }
0825
0826 static int sec_debugfs_init(struct hisi_qm *qm)
0827 {
0828 struct device *dev = &qm->pdev->dev;
0829 int ret;
0830
0831 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
0832 sec_debugfs_root);
0833 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
0834 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
0835
0836 ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
0837 ARRAY_SIZE(sec_diff_regs));
0838 if (ret) {
0839 dev_warn(dev, "Failed to init SEC diff regs!\n");
0840 goto debugfs_remove;
0841 }
0842
0843 hisi_qm_debug_init(qm);
0844
0845 ret = sec_debug_init(qm);
0846 if (ret)
0847 goto failed_to_create;
0848
0849 return 0;
0850
0851 failed_to_create:
0852 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
0853 debugfs_remove:
0854 debugfs_remove_recursive(sec_debugfs_root);
0855 return ret;
0856 }
0857
0858 static void sec_debugfs_exit(struct hisi_qm *qm)
0859 {
0860 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
0861
0862 debugfs_remove_recursive(qm->debug.debug_root);
0863 }
0864
0865 static int sec_show_last_regs_init(struct hisi_qm *qm)
0866 {
0867 struct qm_debug *debug = &qm->debug;
0868 int i;
0869
0870 debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs),
0871 sizeof(unsigned int), GFP_KERNEL);
0872 if (!debug->last_words)
0873 return -ENOMEM;
0874
0875 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
0876 debug->last_words[i] = readl_relaxed(qm->io_base +
0877 sec_dfx_regs[i].offset);
0878
0879 return 0;
0880 }
0881
0882 static void sec_show_last_regs_uninit(struct hisi_qm *qm)
0883 {
0884 struct qm_debug *debug = &qm->debug;
0885
0886 if (qm->fun_type == QM_HW_VF || !debug->last_words)
0887 return;
0888
0889 kfree(debug->last_words);
0890 debug->last_words = NULL;
0891 }
0892
0893 static void sec_show_last_dfx_regs(struct hisi_qm *qm)
0894 {
0895 struct qm_debug *debug = &qm->debug;
0896 struct pci_dev *pdev = qm->pdev;
0897 u32 val;
0898 int i;
0899
0900 if (qm->fun_type == QM_HW_VF || !debug->last_words)
0901 return;
0902
0903
0904 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) {
0905 val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);
0906 if (val != debug->last_words[i])
0907 pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
0908 sec_dfx_regs[i].name, debug->last_words[i], val);
0909 }
0910 }
0911
0912 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
0913 {
0914 const struct sec_hw_error *errs = sec_hw_errors;
0915 struct device *dev = &qm->pdev->dev;
0916 u32 err_val;
0917
0918 while (errs->msg) {
0919 if (errs->int_msk & err_sts) {
0920 dev_err(dev, "%s [error status=0x%x] found\n",
0921 errs->msg, errs->int_msk);
0922
0923 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
0924 err_val = readl(qm->io_base +
0925 SEC_CORE_SRAM_ECC_ERR_INFO);
0926 dev_err(dev, "multi ecc sram num=0x%x\n",
0927 ((err_val) >> SEC_ECC_NUM) &
0928 SEC_ECC_MASH);
0929 }
0930 }
0931 errs++;
0932 }
0933 }
0934
0935 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
0936 {
0937 return readl(qm->io_base + SEC_CORE_INT_STATUS);
0938 }
0939
0940 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
0941 {
0942 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
0943 }
0944
0945 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
0946 {
0947 u32 val;
0948
0949 val = readl(qm->io_base + SEC_CONTROL_REG);
0950 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
0951 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
0952 }
0953
0954 static void sec_err_info_init(struct hisi_qm *qm)
0955 {
0956 struct hisi_qm_err_info *err_info = &qm->err_info;
0957
0958 err_info->ce = QM_BASE_CE;
0959 err_info->fe = 0;
0960 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
0961 err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
0962 err_info->msi_wr_port = BIT(0);
0963 err_info->acpi_rst = "SRST";
0964 err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
0965 QM_ACC_WB_NOT_READY_TIMEOUT;
0966 }
0967
0968 static const struct hisi_qm_err_ini sec_err_ini = {
0969 .hw_init = sec_set_user_domain_and_cache,
0970 .hw_err_enable = sec_hw_error_enable,
0971 .hw_err_disable = sec_hw_error_disable,
0972 .get_dev_hw_err_status = sec_get_hw_err_status,
0973 .clear_dev_hw_err_status = sec_clear_hw_err_status,
0974 .log_dev_hw_err = sec_log_hw_error,
0975 .open_axi_master_ooo = sec_open_axi_master_ooo,
0976 .open_sva_prefetch = sec_open_sva_prefetch,
0977 .close_sva_prefetch = sec_close_sva_prefetch,
0978 .show_last_dfx_regs = sec_show_last_dfx_regs,
0979 .err_info_init = sec_err_info_init,
0980 };
0981
0982 static int sec_pf_probe_init(struct sec_dev *sec)
0983 {
0984 struct hisi_qm *qm = &sec->qm;
0985 int ret;
0986
0987 qm->err_ini = &sec_err_ini;
0988 qm->err_ini->err_info_init(qm);
0989
0990 ret = sec_set_user_domain_and_cache(qm);
0991 if (ret)
0992 return ret;
0993
0994 sec_open_sva_prefetch(qm);
0995 hisi_qm_dev_err_init(qm);
0996 sec_debug_regs_clear(qm);
0997 ret = sec_show_last_regs_init(qm);
0998 if (ret)
0999 pci_err(qm->pdev, "Failed to init last word regs!\n");
1000
1001 return ret;
1002 }
1003
1004 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1005 {
1006 qm->pdev = pdev;
1007 qm->ver = pdev->revision;
1008 qm->algs = "cipher\ndigest\naead";
1009 qm->mode = uacce_mode;
1010 qm->sqe_size = SEC_SQE_SIZE;
1011 qm->dev_name = sec_name;
1012
1013 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?
1014 QM_HW_PF : QM_HW_VF;
1015 if (qm->fun_type == QM_HW_PF) {
1016 qm->qp_base = SEC_PF_DEF_Q_BASE;
1017 qm->qp_num = pf_q_num;
1018 qm->debug.curr_qm_qp_num = pf_q_num;
1019 qm->qm_list = &sec_devices;
1020 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
1021
1022
1023
1024
1025
1026
1027 qm->qp_base = SEC_PF_DEF_Q_NUM;
1028 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
1029 }
1030
1031 return hisi_qm_init(qm);
1032 }
1033
1034 static void sec_qm_uninit(struct hisi_qm *qm)
1035 {
1036 hisi_qm_uninit(qm);
1037 }
1038
1039 static int sec_probe_init(struct sec_dev *sec)
1040 {
1041 u32 type_rate = SEC_SHAPER_TYPE_RATE;
1042 struct hisi_qm *qm = &sec->qm;
1043 int ret;
1044
1045 if (qm->fun_type == QM_HW_PF) {
1046 ret = sec_pf_probe_init(sec);
1047 if (ret)
1048 return ret;
1049
1050 if (qm->ver >= QM_HW_V3) {
1051 type_rate |= QM_SHAPER_ENABLE;
1052 qm->type_rate = type_rate;
1053 }
1054 }
1055
1056 return 0;
1057 }
1058
1059 static void sec_probe_uninit(struct hisi_qm *qm)
1060 {
1061 hisi_qm_dev_err_uninit(qm);
1062 }
1063
1064 static void sec_iommu_used_check(struct sec_dev *sec)
1065 {
1066 struct iommu_domain *domain;
1067 struct device *dev = &sec->qm.pdev->dev;
1068
1069 domain = iommu_get_domain_for_dev(dev);
1070
1071
1072 sec->iommu_used = false;
1073 if (domain) {
1074 if (domain->type & __IOMMU_DOMAIN_PAGING)
1075 sec->iommu_used = true;
1076 dev_info(dev, "SMMU Opened, the iommu type = %u\n",
1077 domain->type);
1078 }
1079 }
1080
1081 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1082 {
1083 struct sec_dev *sec;
1084 struct hisi_qm *qm;
1085 int ret;
1086
1087 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
1088 if (!sec)
1089 return -ENOMEM;
1090
1091 qm = &sec->qm;
1092 ret = sec_qm_init(qm, pdev);
1093 if (ret) {
1094 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
1095 return ret;
1096 }
1097
1098 sec->ctx_q_num = ctx_q_num;
1099 sec_iommu_used_check(sec);
1100
1101 ret = sec_probe_init(sec);
1102 if (ret) {
1103 pci_err(pdev, "Failed to probe!\n");
1104 goto err_qm_uninit;
1105 }
1106
1107 ret = hisi_qm_start(qm);
1108 if (ret) {
1109 pci_err(pdev, "Failed to start sec qm!\n");
1110 goto err_probe_uninit;
1111 }
1112
1113 ret = sec_debugfs_init(qm);
1114 if (ret)
1115 pci_warn(pdev, "Failed to init debugfs!\n");
1116
1117 if (qm->qp_num >= ctx_q_num) {
1118 ret = hisi_qm_alg_register(qm, &sec_devices);
1119 if (ret < 0) {
1120 pr_err("Failed to register driver to crypto.\n");
1121 goto err_qm_stop;
1122 }
1123 } else {
1124 pci_warn(qm->pdev,
1125 "Failed to use kernel mode, qp not enough!\n");
1126 }
1127
1128 if (qm->uacce) {
1129 ret = uacce_register(qm->uacce);
1130 if (ret) {
1131 pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1132 goto err_alg_unregister;
1133 }
1134 }
1135
1136 if (qm->fun_type == QM_HW_PF && vfs_num) {
1137 ret = hisi_qm_sriov_enable(pdev, vfs_num);
1138 if (ret < 0)
1139 goto err_alg_unregister;
1140 }
1141
1142 hisi_qm_pm_init(qm);
1143
1144 return 0;
1145
1146 err_alg_unregister:
1147 if (qm->qp_num >= ctx_q_num)
1148 hisi_qm_alg_unregister(qm, &sec_devices);
1149 err_qm_stop:
1150 sec_debugfs_exit(qm);
1151 hisi_qm_stop(qm, QM_NORMAL);
1152 err_probe_uninit:
1153 sec_show_last_regs_uninit(qm);
1154 sec_probe_uninit(qm);
1155 err_qm_uninit:
1156 sec_qm_uninit(qm);
1157 return ret;
1158 }
1159
1160 static void sec_remove(struct pci_dev *pdev)
1161 {
1162 struct hisi_qm *qm = pci_get_drvdata(pdev);
1163
1164 hisi_qm_pm_uninit(qm);
1165 hisi_qm_wait_task_finish(qm, &sec_devices);
1166 if (qm->qp_num >= ctx_q_num)
1167 hisi_qm_alg_unregister(qm, &sec_devices);
1168
1169 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1170 hisi_qm_sriov_disable(pdev, true);
1171
1172 sec_debugfs_exit(qm);
1173
1174 (void)hisi_qm_stop(qm, QM_NORMAL);
1175
1176 if (qm->fun_type == QM_HW_PF)
1177 sec_debug_regs_clear(qm);
1178 sec_show_last_regs_uninit(qm);
1179
1180 sec_probe_uninit(qm);
1181
1182 sec_qm_uninit(qm);
1183 }
1184
1185 static const struct dev_pm_ops sec_pm_ops = {
1186 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1187 };
1188
1189 static const struct pci_error_handlers sec_err_handler = {
1190 .error_detected = hisi_qm_dev_err_detected,
1191 .slot_reset = hisi_qm_dev_slot_reset,
1192 .reset_prepare = hisi_qm_reset_prepare,
1193 .reset_done = hisi_qm_reset_done,
1194 };
1195
1196 static struct pci_driver sec_pci_driver = {
1197 .name = "hisi_sec2",
1198 .id_table = sec_dev_ids,
1199 .probe = sec_probe,
1200 .remove = sec_remove,
1201 .err_handler = &sec_err_handler,
1202 .sriov_configure = hisi_qm_sriov_configure,
1203 .shutdown = hisi_qm_dev_shutdown,
1204 .driver.pm = &sec_pm_ops,
1205 };
1206
1207 struct pci_driver *hisi_sec_get_pf_driver(void)
1208 {
1209 return &sec_pci_driver;
1210 }
1211 EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver);
1212
1213 static void sec_register_debugfs(void)
1214 {
1215 if (!debugfs_initialized())
1216 return;
1217
1218 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
1219 }
1220
1221 static void sec_unregister_debugfs(void)
1222 {
1223 debugfs_remove_recursive(sec_debugfs_root);
1224 }
1225
1226 static int __init sec_init(void)
1227 {
1228 int ret;
1229
1230 hisi_qm_init_list(&sec_devices);
1231 sec_register_debugfs();
1232
1233 ret = pci_register_driver(&sec_pci_driver);
1234 if (ret < 0) {
1235 sec_unregister_debugfs();
1236 pr_err("Failed to register pci driver.\n");
1237 return ret;
1238 }
1239
1240 return 0;
1241 }
1242
1243 static void __exit sec_exit(void)
1244 {
1245 pci_unregister_driver(&sec_pci_driver);
1246 sec_unregister_debugfs();
1247 }
1248
1249 module_init(sec_init);
1250 module_exit(sec_exit);
1251
1252 MODULE_LICENSE("GPL v2");
1253 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1254 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
1255 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
1256 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
1257 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");