0001
0002
0003
0004
0005
0006
0007 #include <linux/acpi.h>
0008 #include <linux/atomic.h>
0009 #include <linux/delay.h>
0010 #include <linux/dma-direction.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/dmapool.h>
0013 #include <linux/io.h>
0014 #include <linux/iommu.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/irq.h>
0017 #include <linux/irqreturn.h>
0018 #include <linux/mm.h>
0019 #include <linux/module.h>
0020 #include <linux/of.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/slab.h>
0023
0024 #include "sec_drv.h"
0025
0026 #define SEC_QUEUE_AR_FROCE_ALLOC 0
0027 #define SEC_QUEUE_AR_FROCE_NOALLOC 1
0028 #define SEC_QUEUE_AR_FROCE_DIS 2
0029
0030 #define SEC_QUEUE_AW_FROCE_ALLOC 0
0031 #define SEC_QUEUE_AW_FROCE_NOALLOC 1
0032 #define SEC_QUEUE_AW_FROCE_DIS 2
0033
0034
0035 #define SEC_ALGSUB_CLK_EN_REG 0x03b8
0036 #define SEC_ALGSUB_CLK_DIS_REG 0x03bc
0037 #define SEC_ALGSUB_CLK_ST_REG 0x535c
0038 #define SEC_ALGSUB_RST_REQ_REG 0x0aa8
0039 #define SEC_ALGSUB_RST_DREQ_REG 0x0aac
0040 #define SEC_ALGSUB_RST_ST_REG 0x5a54
0041 #define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
0042
0043 #define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
0044 #define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
0045 #define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
0046 #define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
0047
0048 #define SEC_SAA_BASE 0x00001000UL
0049
0050
0051 #define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
0052 #define SEC_SAA_CTRL_GET_QM_EN BIT(0)
0053
0054 #define SEC_ST_INTMSK1_REG 0x0200
0055 #define SEC_ST_RINT1_REG 0x0400
0056 #define SEC_ST_INTSTS1_REG 0x0600
0057 #define SEC_BD_MNG_STAT_REG 0x0800
0058 #define SEC_PARSING_STAT_REG 0x0804
0059 #define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
0060 #define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
0061 #define SEC_BACK_TIME_OUT_CNT_REG 0x0810
0062 #define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
0063 #define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
0064 #define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
0065 #define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
0066 #define SEC_SAA_ACC_REG 0x083c
0067 #define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
0068 #define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
0069 #define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
0070 #define SEC_BACK_WORK_TIME_CNT_REG 0x0868
0071 #define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
0072 #define SEC_SAA_CLK_CNT_REG 0x0870
0073
0074
0075 #define SEC_CLK_EN_REG 0x0000
0076 #define SEC_CTRL_REG 0x0004
0077
0078 #define SEC_COMMON_CNT_CLR_CE_REG 0x0008
0079 #define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
0080 #define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
0081
0082 #define SEC_SECURE_CTRL_REG 0x000c
0083 #define SEC_AXI_CACHE_CFG_REG 0x0010
0084 #define SEC_AXI_QOS_CFG_REG 0x0014
0085 #define SEC_IPV4_MASK_TABLE_REG 0x0020
0086 #define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
0087 #define SEC_FSM_MAX_CNT_REG 0x0064
0088
0089 #define SEC_CTRL2_REG 0x0068
0090 #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
0091 #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
0092 #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
0093 #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
0094 #define SEC_CTRL2_CLK_GATE_EN BIT(7)
0095 #define SEC_CTRL2_ENDIAN_BD BIT(8)
0096 #define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
0097
0098 #define SEC_CNT_PRECISION_CFG_REG 0x006c
0099 #define SEC_DEBUG_BD_CFG_REG 0x0070
0100 #define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
0101 #define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
0102
0103 #define SEC_Q_SIGHT_SEL 0x0074
0104 #define SEC_Q_SIGHT_HIS_CLR 0x0078
0105 #define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
0106 #define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
0107 #define SEC_STAT_CLR_REG 0x0a00
0108 #define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
0109 #define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
0110 #define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
0111 #define SEC_QM_BD_DFX_CFG_REG 0x0b08
0112 #define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
0113 #define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
0114 #define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
0115 #define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
0116 #define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
0117 #define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
0118 #define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
0119 #define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
0120
0121 #define SEC_HASH_IPV4_MASK 0xfff00000
0122 #define SEC_MAX_SAA_NUM 0xa
0123 #define SEC_SAA_ADDR_SIZE 0x1000
0124
0125 #define SEC_Q_INIT_REG 0x0
0126 #define SEC_Q_INIT_WO_STAT_CLEAR 0x2
0127 #define SEC_Q_INIT_AND_STAT_CLEAR 0x3
0128
0129 #define SEC_Q_CFG_REG 0x8
0130 #define SEC_Q_CFG_REORDER BIT(0)
0131
0132 #define SEC_Q_PROC_NUM_CFG_REG 0x10
0133 #define SEC_QUEUE_ENB_REG 0x18
0134
0135 #define SEC_Q_DEPTH_CFG_REG 0x50
0136 #define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
0137 #define SEC_Q_DEPTH_CFG_DEPTH_S 0
0138
0139 #define SEC_Q_BASE_HADDR_REG 0x54
0140 #define SEC_Q_BASE_LADDR_REG 0x58
0141 #define SEC_Q_WR_PTR_REG 0x5c
0142 #define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
0143 #define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
0144 #define SEC_Q_OUTORDER_RD_PTR_REG 0x68
0145 #define SEC_Q_OT_TH_REG 0x6c
0146
0147 #define SEC_Q_ARUSER_CFG_REG 0x70
0148 #define SEC_Q_ARUSER_CFG_FA BIT(0)
0149 #define SEC_Q_ARUSER_CFG_FNA BIT(1)
0150 #define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
0151 #define SEC_Q_ARUSER_CFG_PKG BIT(3)
0152
0153 #define SEC_Q_AWUSER_CFG_REG 0x74
0154 #define SEC_Q_AWUSER_CFG_FA BIT(0)
0155 #define SEC_Q_AWUSER_CFG_FNA BIT(1)
0156 #define SEC_Q_AWUSER_CFG_PKG BIT(2)
0157
0158 #define SEC_Q_ERR_BASE_HADDR_REG 0x7c
0159 #define SEC_Q_ERR_BASE_LADDR_REG 0x80
0160 #define SEC_Q_CFG_VF_NUM_REG 0x84
0161 #define SEC_Q_SOFT_PROC_PTR_REG 0x88
0162 #define SEC_Q_FAIL_INT_MSK_REG 0x300
0163 #define SEC_Q_FLOW_INT_MKS_REG 0x304
0164 #define SEC_Q_FAIL_RINT_REG 0x400
0165 #define SEC_Q_FLOW_RINT_REG 0x404
0166 #define SEC_Q_FAIL_INT_STATUS_REG 0x500
0167 #define SEC_Q_FLOW_INT_STATUS_REG 0x504
0168 #define SEC_Q_STATUS_REG 0x600
0169 #define SEC_Q_RD_PTR_REG 0x604
0170 #define SEC_Q_PRO_PTR_REG 0x608
0171 #define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
0172 #define SEC_Q_OT_CNT_STATUS_REG 0x610
0173 #define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
0174 #define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
0175 #define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
0176 #define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
0177 #define SEC_Q_RD_DONE_PTR_REG 0x660
0178 #define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
0179 #define SEC_Q_CPL_Q_PTR_ST_REG 0x704
0180 #define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
0181 #define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
0182 #define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
0183 #define SEC_Q_WRR_ID_CHECK_REG 0x714
0184 #define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
0185 #define SEC_Q_SUCCESS_BD_CNT_REG 0x800
0186 #define SEC_Q_FAIL_BD_CNT_REG 0x804
0187 #define SEC_Q_GET_BD_CNT_REG 0x808
0188 #define SEC_Q_IVLD_CNT_REG 0x80c
0189 #define SEC_Q_BD_PROC_GET_CNT_REG 0x810
0190 #define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
0191 #define SEC_Q_LAT_CLR_REG 0x850
0192 #define SEC_Q_PKT_LAT_MAX_REG 0x854
0193 #define SEC_Q_PKT_LAT_AVG_REG 0x858
0194 #define SEC_Q_PKT_LAT_MIN_REG 0x85c
0195 #define SEC_Q_ID_CLR_CFG_REG 0x900
0196 #define SEC_Q_1ST_BD_ERR_ID_REG 0x904
0197 #define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
0198 #define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
0199 #define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
0200 #define SEC_Q_1ST_IVLD_ID_REG 0x914
0201 #define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
0202 #define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
0203 #define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
0204
0205 struct sec_debug_bd_info {
0206 #define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
0207 u32 soft_err_check;
0208 #define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
0209 u32 hard_err_check;
0210 u32 icv_mac1st_word;
0211 #define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
0212 u32 sec_get_id;
0213
0214 u32 reserv_left[12];
0215 };
0216
0217 struct sec_out_bd_info {
0218 #define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
0219 #define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
0220 u16 data;
0221 };
0222
0223 #define SEC_MAX_DEVICES 8
0224 static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
0225 static DEFINE_MUTEX(sec_id_lock);
0226
0227 static int sec_queue_map_io(struct sec_queue *queue)
0228 {
0229 struct device *dev = queue->dev_info->dev;
0230 struct resource *res;
0231
0232 res = platform_get_resource(to_platform_device(dev),
0233 IORESOURCE_MEM,
0234 2 + queue->queue_id);
0235 if (!res) {
0236 dev_err(dev, "Failed to get queue %u memory resource\n",
0237 queue->queue_id);
0238 return -ENOMEM;
0239 }
0240 queue->regs = ioremap(res->start, resource_size(res));
0241 if (!queue->regs)
0242 return -ENOMEM;
0243
0244 return 0;
0245 }
0246
0247 static void sec_queue_unmap_io(struct sec_queue *queue)
0248 {
0249 iounmap(queue->regs);
0250 }
0251
0252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
0253 {
0254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
0255 u32 regval;
0256
0257 regval = readl_relaxed(addr);
0258 if (ar_pkg)
0259 regval |= SEC_Q_ARUSER_CFG_PKG;
0260 else
0261 regval &= ~SEC_Q_ARUSER_CFG_PKG;
0262 writel_relaxed(regval, addr);
0263
0264 return 0;
0265 }
0266
0267 static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
0268 {
0269 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
0270 u32 regval;
0271
0272 regval = readl_relaxed(addr);
0273 regval |= SEC_Q_AWUSER_CFG_PKG;
0274 writel_relaxed(regval, addr);
0275
0276 return 0;
0277 }
0278
0279 static int sec_clk_en(struct sec_dev_info *info)
0280 {
0281 void __iomem *base = info->regs[SEC_COMMON];
0282 u32 i = 0;
0283
0284 writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
0285 do {
0286 usleep_range(1000, 10000);
0287 if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
0288 return 0;
0289 i++;
0290 } while (i < 10);
0291 dev_err(info->dev, "sec clock enable fail!\n");
0292
0293 return -EIO;
0294 }
0295
0296 static int sec_clk_dis(struct sec_dev_info *info)
0297 {
0298 void __iomem *base = info->regs[SEC_COMMON];
0299 u32 i = 0;
0300
0301 writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
0302 do {
0303 usleep_range(1000, 10000);
0304 if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
0305 return 0;
0306 i++;
0307 } while (i < 10);
0308 dev_err(info->dev, "sec clock disable fail!\n");
0309
0310 return -EIO;
0311 }
0312
0313 static int sec_reset_whole_module(struct sec_dev_info *info)
0314 {
0315 void __iomem *base = info->regs[SEC_COMMON];
0316 bool is_reset, b_is_reset;
0317 u32 i = 0;
0318
0319 writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
0320 writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
0321 while (1) {
0322 usleep_range(1000, 10000);
0323 is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
0324 SEC_ALGSUB_RST_ST_IS_RST;
0325 b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
0326 SEC_ALGSUB_BUILD_RST_ST_IS_RST;
0327 if (is_reset && b_is_reset)
0328 break;
0329 i++;
0330 if (i > 10) {
0331 dev_err(info->dev, "Reset req failed\n");
0332 return -EIO;
0333 }
0334 }
0335
0336 i = 0;
0337 writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
0338 writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
0339 while (1) {
0340 usleep_range(1000, 10000);
0341 is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
0342 SEC_ALGSUB_RST_ST_IS_RST;
0343 b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
0344 SEC_ALGSUB_BUILD_RST_ST_IS_RST;
0345 if (!is_reset && !b_is_reset)
0346 break;
0347
0348 i++;
0349 if (i > 10) {
0350 dev_err(info->dev, "Reset dreq failed\n");
0351 return -EIO;
0352 }
0353 }
0354
0355 return 0;
0356 }
0357
0358 static void sec_bd_endian_little(struct sec_dev_info *info)
0359 {
0360 void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
0361 u32 regval;
0362
0363 regval = readl_relaxed(addr);
0364 regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
0365 writel_relaxed(regval, addr);
0366 }
0367
0368
0369
0370
0371 static void sec_cache_config(struct sec_dev_info *info)
0372 {
0373 struct iommu_domain *domain;
0374 void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
0375
0376 domain = iommu_get_domain_for_dev(info->dev);
0377
0378
0379 if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
0380 writel_relaxed(0x44cf9e, addr);
0381 else
0382 writel_relaxed(0x4cfd9, addr);
0383 }
0384
0385 static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
0386 {
0387 void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
0388 u32 regval;
0389
0390 regval = readl_relaxed(addr);
0391 regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
0392 regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
0393 SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
0394 writel_relaxed(regval, addr);
0395 }
0396
0397 static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
0398 {
0399 void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
0400 u32 regval;
0401
0402 regval = readl_relaxed(addr);
0403 regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
0404 regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
0405 SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
0406 writel_relaxed(regval, addr);
0407 }
0408
0409 static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
0410 {
0411 void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
0412 u32 regval;
0413
0414 regval = readl_relaxed(addr);
0415 if (clkgate)
0416 regval |= SEC_CTRL2_CLK_GATE_EN;
0417 else
0418 regval &= ~SEC_CTRL2_CLK_GATE_EN;
0419 writel_relaxed(regval, addr);
0420 }
0421
0422 static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
0423 {
0424 void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
0425 u32 regval;
0426
0427 regval = readl_relaxed(addr);
0428 if (clr_ce)
0429 regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
0430 else
0431 regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
0432 writel_relaxed(regval, addr);
0433 }
0434
0435 static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
0436 {
0437 void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
0438 u32 regval;
0439
0440 regval = readl_relaxed(addr);
0441 if (snap_en)
0442 regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
0443 else
0444 regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
0445 writel_relaxed(regval, addr);
0446 }
0447
0448 static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
0449 {
0450 void __iomem *base = info->regs[SEC_SAA];
0451 int i;
0452
0453 for (i = 0; i < 10; i++)
0454 writel_relaxed(hash_mask[0],
0455 base + SEC_IPV6_MASK_TABLE_X_REG(i));
0456 }
0457
0458 static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
0459 {
0460 if (hash_mask & SEC_HASH_IPV4_MASK) {
0461 dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
0462 return -EINVAL;
0463 }
0464
0465 writel_relaxed(hash_mask,
0466 info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
0467
0468 return 0;
0469 }
0470
0471 static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
0472 {
0473 void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
0474 u32 regval;
0475
0476 regval = readl_relaxed(addr);
0477
0478 regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
0479
0480 if (cfg)
0481 regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
0482 else
0483 regval |= SEC_DEBUG_BD_CFG_WB_EN;
0484
0485 writel_relaxed(regval, addr);
0486 }
0487
0488 static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
0489 {
0490 void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
0491 SEC_SAA_CTRL_REG(saa_indx);
0492 u32 regval;
0493
0494 regval = readl_relaxed(addr);
0495 if (en)
0496 regval |= SEC_SAA_CTRL_GET_QM_EN;
0497 else
0498 regval &= ~SEC_SAA_CTRL_GET_QM_EN;
0499 writel_relaxed(regval, addr);
0500 }
0501
0502 static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
0503 u32 saa_int_mask)
0504 {
0505 writel_relaxed(saa_int_mask,
0506 info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
0507 saa_indx * SEC_SAA_ADDR_SIZE);
0508 }
0509
0510 static void sec_streamid(struct sec_dev_info *info, int i)
0511 {
0512 #define SEC_SID 0x600
0513 #define SEC_VMID 0
0514
0515 writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
0516 info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
0517 }
0518
0519 static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
0520 {
0521 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
0522 u32 regval;
0523
0524 regval = readl_relaxed(addr);
0525 if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
0526 regval |= SEC_Q_ARUSER_CFG_FA;
0527 regval &= ~SEC_Q_ARUSER_CFG_FNA;
0528 } else {
0529 regval &= ~SEC_Q_ARUSER_CFG_FA;
0530 regval |= SEC_Q_ARUSER_CFG_FNA;
0531 }
0532
0533 writel_relaxed(regval, addr);
0534 }
0535
0536 static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
0537 {
0538 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
0539 u32 regval;
0540
0541 regval = readl_relaxed(addr);
0542 if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
0543 regval |= SEC_Q_AWUSER_CFG_FA;
0544 regval &= ~SEC_Q_AWUSER_CFG_FNA;
0545 } else {
0546 regval &= ~SEC_Q_AWUSER_CFG_FA;
0547 regval |= SEC_Q_AWUSER_CFG_FNA;
0548 }
0549
0550 writel_relaxed(regval, addr);
0551 }
0552
0553 static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
0554 {
0555 void __iomem *base = queue->regs;
0556 u32 regval;
0557
0558 regval = readl_relaxed(base + SEC_Q_CFG_REG);
0559 if (reorder)
0560 regval |= SEC_Q_CFG_REORDER;
0561 else
0562 regval &= ~SEC_Q_CFG_REORDER;
0563 writel_relaxed(regval, base + SEC_Q_CFG_REG);
0564 }
0565
0566 static void sec_queue_depth(struct sec_queue *queue, u32 depth)
0567 {
0568 void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
0569 u32 regval;
0570
0571 regval = readl_relaxed(addr);
0572 regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
0573 regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
0574
0575 writel_relaxed(regval, addr);
0576 }
0577
0578 static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
0579 {
0580 writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
0581 writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
0582 }
0583
0584 static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
0585 {
0586 writel_relaxed(upper_32_bits(addr),
0587 queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
0588 writel_relaxed(lower_32_bits(addr),
0589 queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
0590 }
0591
0592 static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
0593 {
0594 writel_relaxed(upper_32_bits(addr),
0595 queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
0596 writel_relaxed(lower_32_bits(addr),
0597 queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
0598 }
0599
0600 static void sec_queue_irq_disable(struct sec_queue *queue)
0601 {
0602 writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
0603 }
0604
0605 static void sec_queue_irq_enable(struct sec_queue *queue)
0606 {
0607 writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
0608 }
0609
0610 static void sec_queue_abn_irq_disable(struct sec_queue *queue)
0611 {
0612 writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
0613 }
0614
0615 static void sec_queue_stop(struct sec_queue *queue)
0616 {
0617 disable_irq(queue->task_irq);
0618 sec_queue_irq_disable(queue);
0619 writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
0620 }
0621
0622 static void sec_queue_start(struct sec_queue *queue)
0623 {
0624 sec_queue_irq_enable(queue);
0625 enable_irq(queue->task_irq);
0626 queue->expected = 0;
0627 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
0628 writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
0629 }
0630
0631 static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
0632 {
0633 int i;
0634
0635 mutex_lock(&info->dev_lock);
0636
0637
0638 for (i = 0; i < SEC_Q_NUM; i++)
0639 if (!info->queues[i].in_use) {
0640 info->queues[i].in_use = true;
0641 info->queues_in_use++;
0642 mutex_unlock(&info->dev_lock);
0643
0644 return &info->queues[i];
0645 }
0646 mutex_unlock(&info->dev_lock);
0647
0648 return ERR_PTR(-ENODEV);
0649 }
0650
0651 static int sec_queue_free(struct sec_queue *queue)
0652 {
0653 struct sec_dev_info *info = queue->dev_info;
0654
0655 if (queue->queue_id >= SEC_Q_NUM) {
0656 dev_err(info->dev, "No queue %u\n", queue->queue_id);
0657 return -ENODEV;
0658 }
0659
0660 if (!queue->in_use) {
0661 dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
0662 return -ENODEV;
0663 }
0664
0665 mutex_lock(&info->dev_lock);
0666 queue->in_use = false;
0667 info->queues_in_use--;
0668 mutex_unlock(&info->dev_lock);
0669
0670 return 0;
0671 }
0672
0673 static irqreturn_t sec_isr_handle_th(int irq, void *q)
0674 {
0675 sec_queue_irq_disable(q);
0676 return IRQ_WAKE_THREAD;
0677 }
0678
0679 static irqreturn_t sec_isr_handle(int irq, void *q)
0680 {
0681 struct sec_queue *queue = q;
0682 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
0683 struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
0684 struct sec_out_bd_info *outorder_msg;
0685 struct sec_bd_info *msg;
0686 u32 ooo_read, ooo_write;
0687 void __iomem *base = queue->regs;
0688 int q_id;
0689
0690 ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
0691 ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
0692 outorder_msg = cq_ring->vaddr + ooo_read;
0693 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
0694 msg = msg_ring->vaddr + q_id;
0695
0696 while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
0697
0698
0699
0700
0701 set_bit(q_id, queue->unprocessed);
0702 if (q_id == queue->expected)
0703 while (test_bit(queue->expected, queue->unprocessed)) {
0704 clear_bit(queue->expected, queue->unprocessed);
0705 msg = msg_ring->vaddr + queue->expected;
0706 msg->w0 &= ~SEC_BD_W0_DONE;
0707 msg_ring->callback(msg,
0708 queue->shadow[queue->expected]);
0709 queue->shadow[queue->expected] = NULL;
0710 queue->expected = (queue->expected + 1) %
0711 SEC_QUEUE_LEN;
0712 atomic_dec(&msg_ring->used);
0713 }
0714
0715 ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
0716 writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
0717 ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
0718 outorder_msg = cq_ring->vaddr + ooo_read;
0719 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
0720 msg = msg_ring->vaddr + q_id;
0721 }
0722
0723 sec_queue_irq_enable(queue);
0724
0725 return IRQ_HANDLED;
0726 }
0727
0728 static int sec_queue_irq_init(struct sec_queue *queue)
0729 {
0730 struct sec_dev_info *info = queue->dev_info;
0731 int irq = queue->task_irq;
0732 int ret;
0733
0734 ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
0735 IRQF_TRIGGER_RISING, queue->name, queue);
0736 if (ret) {
0737 dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
0738 return ret;
0739 }
0740 disable_irq(irq);
0741
0742 return 0;
0743 }
0744
0745 static int sec_queue_irq_uninit(struct sec_queue *queue)
0746 {
0747 free_irq(queue->task_irq, queue);
0748
0749 return 0;
0750 }
0751
0752 static struct sec_dev_info *sec_device_get(void)
0753 {
0754 struct sec_dev_info *sec_dev = NULL;
0755 struct sec_dev_info *this_sec_dev;
0756 int least_busy_n = SEC_Q_NUM + 1;
0757 int i;
0758
0759
0760 for (i = 0; i < SEC_MAX_DEVICES; i++) {
0761 this_sec_dev = sec_devices[i];
0762 if (this_sec_dev &&
0763 this_sec_dev->queues_in_use < least_busy_n) {
0764 least_busy_n = this_sec_dev->queues_in_use;
0765 sec_dev = this_sec_dev;
0766 }
0767 }
0768
0769 return sec_dev;
0770 }
0771
0772 static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
0773 {
0774 struct sec_queue *queue;
0775
0776 queue = sec_alloc_queue(info);
0777 if (IS_ERR(queue)) {
0778 dev_err(info->dev, "alloc sec queue failed! %ld\n",
0779 PTR_ERR(queue));
0780 return queue;
0781 }
0782
0783 sec_queue_start(queue);
0784
0785 return queue;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 struct sec_queue *sec_queue_alloc_start_safe(void)
0797 {
0798 struct sec_dev_info *info;
0799 struct sec_queue *queue = ERR_PTR(-ENODEV);
0800
0801 mutex_lock(&sec_id_lock);
0802 info = sec_device_get();
0803 if (!info)
0804 goto unlock;
0805
0806 queue = sec_queue_alloc_start(info);
0807
0808 unlock:
0809 mutex_unlock(&sec_id_lock);
0810
0811 return queue;
0812 }
0813
0814
0815
0816
0817
0818
0819
0820
0821 int sec_queue_stop_release(struct sec_queue *queue)
0822 {
0823 struct device *dev = queue->dev_info->dev;
0824 int ret;
0825
0826 sec_queue_stop(queue);
0827
0828 ret = sec_queue_free(queue);
0829 if (ret)
0830 dev_err(dev, "Releasing queue failed %d\n", ret);
0831
0832 return ret;
0833 }
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843 bool sec_queue_empty(struct sec_queue *queue)
0844 {
0845 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
0846
0847 return !atomic_read(&msg_ring->used);
0848 }
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
0859 {
0860 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
0861 void __iomem *base = queue->regs;
0862 u32 write, read;
0863
0864 mutex_lock(&msg_ring->lock);
0865 read = readl(base + SEC_Q_RD_PTR_REG);
0866 write = readl(base + SEC_Q_WR_PTR_REG);
0867 if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
0868 mutex_unlock(&msg_ring->lock);
0869 return -EAGAIN;
0870 }
0871 memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
0872 queue->shadow[write] = ctx;
0873 write = (write + 1) % SEC_QUEUE_LEN;
0874
0875
0876 wmb();
0877 writel(write, base + SEC_Q_WR_PTR_REG);
0878
0879 atomic_inc(&msg_ring->used);
0880 mutex_unlock(&msg_ring->lock);
0881
0882 return 0;
0883 }
0884
0885 bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
0886 {
0887 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
0888
0889 return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
0890 }
0891
0892 static void sec_queue_hw_init(struct sec_queue *queue)
0893 {
0894 sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
0895 sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC);
0896 sec_queue_ar_pkgattr(queue, 1);
0897 sec_queue_aw_pkgattr(queue, 1);
0898
0899
0900 sec_queue_reorder(queue, true);
0901
0902
0903 writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
0904
0905 sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
0906
0907 sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
0908
0909 sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
0910
0911 sec_queue_errbase_addr(queue, queue->ring_db.paddr);
0912
0913 writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
0914
0915 sec_queue_abn_irq_disable(queue);
0916 sec_queue_irq_disable(queue);
0917 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
0918 }
0919
0920 static int sec_hw_init(struct sec_dev_info *info)
0921 {
0922 struct iommu_domain *domain;
0923 u32 sec_ipv4_mask = 0;
0924 u32 sec_ipv6_mask[10] = {};
0925 u32 i, ret;
0926
0927 domain = iommu_get_domain_for_dev(info->dev);
0928
0929
0930
0931
0932
0933 if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
0934 info->num_saas = 5;
0935
0936 else
0937 info->num_saas = 10;
0938
0939 writel_relaxed(GENMASK(info->num_saas - 1, 0),
0940 info->regs[SEC_SAA] + SEC_CLK_EN_REG);
0941
0942
0943 sec_bd_endian_little(info);
0944
0945 sec_cache_config(info);
0946
0947
0948 sec_data_axiwr_otsd_cfg(info, 0x7);
0949 sec_data_axird_otsd_cfg(info, 0x7);
0950
0951
0952 sec_clk_gate_en(info, true);
0953
0954
0955 sec_comm_cnt_cfg(info, false);
0956
0957
0958 sec_commsnap_en(info, false);
0959
0960 writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
0961
0962 ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
0963 if (ret) {
0964 dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
0965 return -EIO;
0966 }
0967
0968 sec_ipv6_hashmask(info, sec_ipv6_mask);
0969
0970
0971 sec_set_dbg_bd_cfg(info, 0);
0972
0973 if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
0974 for (i = 0; i < SEC_Q_NUM; i++) {
0975 sec_streamid(info, i);
0976
0977 writel_relaxed(0x3f,
0978 info->regs[SEC_SAA] +
0979 SEC_Q_WEIGHT_CFG_REG(i));
0980 }
0981 }
0982
0983 for (i = 0; i < info->num_saas; i++) {
0984 sec_saa_getqm_en(info, i, 1);
0985 sec_saa_int_mask(info, i, 0);
0986 }
0987
0988 return 0;
0989 }
0990
0991 static void sec_hw_exit(struct sec_dev_info *info)
0992 {
0993 int i;
0994
0995 for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
0996 sec_saa_int_mask(info, i, (u32)~0);
0997 sec_saa_getqm_en(info, i, 0);
0998 }
0999 }
1000
1001 static void sec_queue_base_init(struct sec_dev_info *info,
1002 struct sec_queue *queue, int queue_id)
1003 {
1004 queue->dev_info = info;
1005 queue->queue_id = queue_id;
1006 snprintf(queue->name, sizeof(queue->name),
1007 "%s_%d", dev_name(info->dev), queue->queue_id);
1008 }
1009
1010 static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
1011 {
1012 struct resource *res;
1013 int i;
1014
1015 for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
1016 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1017
1018 if (!res) {
1019 dev_err(info->dev, "Memory resource %d not found\n", i);
1020 return -EINVAL;
1021 }
1022
1023 info->regs[i] = devm_ioremap(info->dev, res->start,
1024 resource_size(res));
1025 if (!info->regs[i]) {
1026 dev_err(info->dev,
1027 "Memory resource %d could not be remapped\n",
1028 i);
1029 return -EINVAL;
1030 }
1031 }
1032
1033 return 0;
1034 }
1035
1036 static int sec_base_init(struct sec_dev_info *info,
1037 struct platform_device *pdev)
1038 {
1039 int ret;
1040
1041 ret = sec_map_io(info, pdev);
1042 if (ret)
1043 return ret;
1044
1045 ret = sec_clk_en(info);
1046 if (ret)
1047 return ret;
1048
1049 ret = sec_reset_whole_module(info);
1050 if (ret)
1051 goto sec_clk_disable;
1052
1053 ret = sec_hw_init(info);
1054 if (ret)
1055 goto sec_clk_disable;
1056
1057 return 0;
1058
1059 sec_clk_disable:
1060 sec_clk_dis(info);
1061
1062 return ret;
1063 }
1064
1065 static void sec_base_exit(struct sec_dev_info *info)
1066 {
1067 sec_hw_exit(info);
1068 sec_clk_dis(info);
1069 }
1070
1071 #define SEC_Q_CMD_SIZE \
1072 round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
1073 #define SEC_Q_CQ_SIZE \
1074 round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
1075 #define SEC_Q_DB_SIZE \
1076 round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
1077
1078 static int sec_queue_res_cfg(struct sec_queue *queue)
1079 {
1080 struct device *dev = queue->dev_info->dev;
1081 struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
1082 struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
1083 struct sec_queue_ring_db *ring_db = &queue->ring_db;
1084 int ret;
1085
1086 ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
1087 &ring_cmd->paddr, GFP_KERNEL);
1088 if (!ring_cmd->vaddr)
1089 return -ENOMEM;
1090
1091 atomic_set(&ring_cmd->used, 0);
1092 mutex_init(&ring_cmd->lock);
1093 ring_cmd->callback = sec_alg_callback;
1094
1095 ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
1096 &ring_cq->paddr, GFP_KERNEL);
1097 if (!ring_cq->vaddr) {
1098 ret = -ENOMEM;
1099 goto err_free_ring_cmd;
1100 }
1101
1102 ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
1103 &ring_db->paddr, GFP_KERNEL);
1104 if (!ring_db->vaddr) {
1105 ret = -ENOMEM;
1106 goto err_free_ring_cq;
1107 }
1108 queue->task_irq = platform_get_irq(to_platform_device(dev),
1109 queue->queue_id * 2 + 1);
1110 if (queue->task_irq <= 0) {
1111 ret = -EINVAL;
1112 goto err_free_ring_db;
1113 }
1114
1115 return 0;
1116
1117 err_free_ring_db:
1118 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1119 queue->ring_db.paddr);
1120 err_free_ring_cq:
1121 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1122 queue->ring_cq.paddr);
1123 err_free_ring_cmd:
1124 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1125 queue->ring_cmd.paddr);
1126
1127 return ret;
1128 }
1129
1130 static void sec_queue_free_ring_pages(struct sec_queue *queue)
1131 {
1132 struct device *dev = queue->dev_info->dev;
1133
1134 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1135 queue->ring_db.paddr);
1136 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1137 queue->ring_cq.paddr);
1138 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1139 queue->ring_cmd.paddr);
1140 }
1141
1142 static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
1143 int queue_id)
1144 {
1145 int ret;
1146
1147 sec_queue_base_init(info, queue, queue_id);
1148
1149 ret = sec_queue_res_cfg(queue);
1150 if (ret)
1151 return ret;
1152
1153 ret = sec_queue_map_io(queue);
1154 if (ret) {
1155 dev_err(info->dev, "Queue map failed %d\n", ret);
1156 sec_queue_free_ring_pages(queue);
1157 return ret;
1158 }
1159
1160 sec_queue_hw_init(queue);
1161
1162 return 0;
1163 }
1164
1165 static void sec_queue_unconfig(struct sec_dev_info *info,
1166 struct sec_queue *queue)
1167 {
1168 sec_queue_unmap_io(queue);
1169 sec_queue_free_ring_pages(queue);
1170 }
1171
1172 static int sec_id_alloc(struct sec_dev_info *info)
1173 {
1174 int ret = 0;
1175 int i;
1176
1177 mutex_lock(&sec_id_lock);
1178
1179 for (i = 0; i < SEC_MAX_DEVICES; i++)
1180 if (!sec_devices[i])
1181 break;
1182 if (i == SEC_MAX_DEVICES) {
1183 ret = -ENOMEM;
1184 goto unlock;
1185 }
1186 info->sec_id = i;
1187 sec_devices[info->sec_id] = info;
1188
1189 unlock:
1190 mutex_unlock(&sec_id_lock);
1191
1192 return ret;
1193 }
1194
1195 static void sec_id_free(struct sec_dev_info *info)
1196 {
1197 mutex_lock(&sec_id_lock);
1198 sec_devices[info->sec_id] = NULL;
1199 mutex_unlock(&sec_id_lock);
1200 }
1201
1202 static int sec_probe(struct platform_device *pdev)
1203 {
1204 struct sec_dev_info *info;
1205 struct device *dev = &pdev->dev;
1206 int i, j;
1207 int ret;
1208
1209 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1210 if (ret) {
1211 dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
1212 return -ENODEV;
1213 }
1214
1215 info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
1216 if (!info)
1217 return -ENOMEM;
1218
1219 info->dev = dev;
1220 mutex_init(&info->dev_lock);
1221
1222 info->hw_sgl_pool = dmam_pool_create("sgl", dev,
1223 sizeof(struct sec_hw_sgl), 64, 0);
1224 if (!info->hw_sgl_pool) {
1225 dev_err(dev, "Failed to create sec sgl dma pool\n");
1226 return -ENOMEM;
1227 }
1228
1229 ret = sec_base_init(info, pdev);
1230 if (ret) {
1231 dev_err(dev, "Base initialization fail! %d\n", ret);
1232 return ret;
1233 }
1234
1235 for (i = 0; i < SEC_Q_NUM; i++) {
1236 ret = sec_queue_config(info, &info->queues[i], i);
1237 if (ret)
1238 goto queues_unconfig;
1239
1240 ret = sec_queue_irq_init(&info->queues[i]);
1241 if (ret) {
1242 sec_queue_unconfig(info, &info->queues[i]);
1243 goto queues_unconfig;
1244 }
1245 }
1246
1247 ret = sec_algs_register();
1248 if (ret) {
1249 dev_err(dev, "Failed to register algorithms with crypto %d\n",
1250 ret);
1251 goto queues_unconfig;
1252 }
1253
1254 platform_set_drvdata(pdev, info);
1255
1256 ret = sec_id_alloc(info);
1257 if (ret)
1258 goto algs_unregister;
1259
1260 return 0;
1261
1262 algs_unregister:
1263 sec_algs_unregister();
1264 queues_unconfig:
1265 for (j = i - 1; j >= 0; j--) {
1266 sec_queue_irq_uninit(&info->queues[j]);
1267 sec_queue_unconfig(info, &info->queues[j]);
1268 }
1269 sec_base_exit(info);
1270
1271 return ret;
1272 }
1273
1274 static int sec_remove(struct platform_device *pdev)
1275 {
1276 struct sec_dev_info *info = platform_get_drvdata(pdev);
1277 int i;
1278
1279
1280 sec_id_free(info);
1281
1282 sec_algs_unregister();
1283
1284 for (i = 0; i < SEC_Q_NUM; i++) {
1285 sec_queue_irq_uninit(&info->queues[i]);
1286 sec_queue_unconfig(info, &info->queues[i]);
1287 }
1288
1289 sec_base_exit(info);
1290
1291 return 0;
1292 }
1293
1294 static const __maybe_unused struct of_device_id sec_match[] = {
1295 { .compatible = "hisilicon,hip06-sec" },
1296 { .compatible = "hisilicon,hip07-sec" },
1297 {}
1298 };
1299 MODULE_DEVICE_TABLE(of, sec_match);
1300
1301 static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
1302 { "HISI02C1", 0 },
1303 { }
1304 };
1305 MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
1306
1307 static struct platform_driver sec_driver = {
1308 .probe = sec_probe,
1309 .remove = sec_remove,
1310 .driver = {
1311 .name = "hisi_sec_platform_driver",
1312 .of_match_table = sec_match,
1313 .acpi_match_table = ACPI_PTR(sec_acpi_match),
1314 },
1315 };
1316 module_platform_driver(sec_driver);
1317
1318 MODULE_LICENSE("GPL");
1319 MODULE_DESCRIPTION("HiSilicon Security Accelerators");
1320 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
1321 MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");