0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/miscdevice.h>
0014 #include <linux/io.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/kernel.h>
0017 #include <linux/module.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/poll.h>
0020 #include <linux/slab.h>
0021 #include <linux/clk.h>
0022 #include <linux/compat.h>
0023 #include <linux/highmem.h>
0024
0025 #include <uapi/misc/xilinx_sdfec.h>
0026
0027 #define DEV_NAME_LEN 12
0028
0029 static DEFINE_IDA(dev_nrs);
0030
0031
0032
0033 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
0034
0035
0036 #define XSDFEC_ACTIVE_ADDR (0x8)
0037 #define XSDFEC_IS_ACTIVITY_SET (0x1)
0038
0039
0040 #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
0041 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
0042 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
0043 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
0044 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
0045
0046
0047 #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
0048 #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
0049 #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
0050 #define XSDFEC_AXIS_ENABLE_MASK \
0051 (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
0052
0053
0054 #define XSDFEC_FEC_CODE_ADDR (0x14)
0055
0056
0057 #define XSDFEC_ORDER_ADDR (0x18)
0058
0059
0060 #define XSDFEC_ISR_ADDR (0x1C)
0061
0062 #define XSDFEC_ISR_MASK (0x3F)
0063
0064
0065 #define XSDFEC_IER_ADDR (0x20)
0066
0067 #define XSDFEC_IDR_ADDR (0x24)
0068
0069 #define XSDFEC_IMR_ADDR (0x28)
0070
0071
0072 #define XSDFEC_ECC_ISR_ADDR (0x2C)
0073
0074 #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
0075
0076 #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
0077
0078 #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
0079
0080 #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
0081
0082 #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
0083
0084 #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
0085
0086 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
0087
0088 #define XSDFEC_PL_INIT_ECC_ISR_MASK \
0089 (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
0090
0091 #define XSDFEC_ALL_ECC_ISR_MASK \
0092 (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
0093
0094 #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
0095 (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
0096
0097 #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
0098 (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
0099
0100
0101 #define XSDFEC_ECC_IER_ADDR (0x30)
0102
0103 #define XSDFEC_ECC_IDR_ADDR (0x34)
0104
0105 #define XSDFEC_ECC_IMR_ADDR (0x38)
0106
0107
0108 #define XSDFEC_BYPASS_ADDR (0x3C)
0109
0110
0111 #define XSDFEC_TURBO_ADDR (0x100)
0112 #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
0113 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
0114 #define XSDFEC_TURBO_SCALE_MAX (15)
0115
0116
0117 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
0118 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
0119 #define XSDFEC_REG0_N_MIN (4)
0120 #define XSDFEC_REG0_N_MAX (32768)
0121 #define XSDFEC_REG0_N_MUL_P (256)
0122 #define XSDFEC_REG0_N_LSB (0)
0123 #define XSDFEC_REG0_K_MIN (2)
0124 #define XSDFEC_REG0_K_MAX (32766)
0125 #define XSDFEC_REG0_K_MUL_P (256)
0126 #define XSDFEC_REG0_K_LSB (16)
0127
0128
0129 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
0130 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
0131 #define XSDFEC_REG1_PSIZE_MIN (2)
0132 #define XSDFEC_REG1_PSIZE_MAX (512)
0133 #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
0134 #define XSDFEC_REG1_NO_PACKING_LSB (10)
0135 #define XSDFEC_REG1_NM_MASK (0xFF800)
0136 #define XSDFEC_REG1_NM_LSB (11)
0137 #define XSDFEC_REG1_BYPASS_MASK (0x100000)
0138
0139
0140 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
0141 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
0142 #define XSDFEC_REG2_NLAYERS_MIN (1)
0143 #define XSDFEC_REG2_NLAYERS_MAX (256)
0144 #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
0145 #define XSDFEC_REG2_NMQC_LSB (9)
0146 #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
0147 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
0148 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
0149 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
0150 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
0151 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
0152 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
0153 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
0154
0155
0156 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
0157 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
0158 #define XSDFEC_REG3_LA_OFF_LSB (8)
0159 #define XSDFEC_REG3_QC_OFF_LSB (16)
0160
0161 #define XSDFEC_LDPC_REG_JUMP (0x10)
0162 #define XSDFEC_REG_WIDTH_JUMP (4)
0163
0164
0165 #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 struct xsdfec_clks {
0179 struct clk *core_clk;
0180 struct clk *axi_clk;
0181 struct clk *din_words_clk;
0182 struct clk *din_clk;
0183 struct clk *dout_clk;
0184 struct clk *dout_words_clk;
0185 struct clk *ctrl_clk;
0186 struct clk *status_clk;
0187 };
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 struct xsdfec_dev {
0213 struct miscdevice miscdev;
0214 struct xsdfec_clks clks;
0215 wait_queue_head_t waitq;
0216 struct xsdfec_config config;
0217 char dev_name[DEV_NAME_LEN];
0218 unsigned long flags;
0219 void __iomem *regs;
0220 struct device *dev;
0221 enum xsdfec_state state;
0222
0223 spinlock_t error_data_lock;
0224 int dev_id;
0225 u32 isr_err_count;
0226 u32 cecc_count;
0227 u32 uecc_count;
0228 int irq;
0229 bool state_updated;
0230 bool stats_updated;
0231 bool intr_enabled;
0232 };
0233
0234 static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
0235 u32 value)
0236 {
0237 dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
0238 iowrite32(value, xsdfec->regs + addr);
0239 }
0240
0241 static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
0242 {
0243 u32 rval;
0244
0245 rval = ioread32(xsdfec->regs + addr);
0246 dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
0247 return rval;
0248 }
0249
0250 static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
0251 u32 reg_offset, u32 bit_num,
0252 char *config_value)
0253 {
0254 u32 reg_val;
0255 u32 bit_mask = 1 << bit_num;
0256
0257 reg_val = xsdfec_regread(xsdfec, reg_offset);
0258 *config_value = (reg_val & bit_mask) > 0;
0259 }
0260
0261 static void update_config_from_hw(struct xsdfec_dev *xsdfec)
0262 {
0263 u32 reg_value;
0264 bool sdfec_started;
0265
0266
0267 reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
0268 xsdfec->config.order = reg_value;
0269
0270 update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
0271 0,
0272 &xsdfec->config.bypass);
0273
0274 update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
0275 0,
0276 &xsdfec->config.code_wr_protect);
0277
0278 reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
0279 xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
0280
0281 reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
0282 xsdfec->config.irq.enable_ecc_isr =
0283 (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
0284
0285 reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
0286 sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
0287 if (sdfec_started)
0288 xsdfec->state = XSDFEC_STARTED;
0289 else
0290 xsdfec->state = XSDFEC_STOPPED;
0291 }
0292
0293 static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
0294 {
0295 struct xsdfec_status status;
0296 int err;
0297
0298 memset(&status, 0, sizeof(status));
0299 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
0300 status.state = xsdfec->state;
0301 xsdfec->state_updated = false;
0302 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
0303 status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
0304 XSDFEC_IS_ACTIVITY_SET);
0305
0306 err = copy_to_user(arg, &status, sizeof(status));
0307 if (err)
0308 err = -EFAULT;
0309
0310 return err;
0311 }
0312
0313 static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
0314 {
0315 int err;
0316
0317 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
0318 if (err)
0319 err = -EFAULT;
0320
0321 return err;
0322 }
0323
0324 static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
0325 {
0326 u32 mask_read;
0327
0328 if (enable) {
0329
0330 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
0331 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
0332 if (mask_read & XSDFEC_ISR_MASK) {
0333 dev_dbg(xsdfec->dev,
0334 "SDFEC enabling irq with IER failed");
0335 return -EIO;
0336 }
0337 } else {
0338
0339 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
0340 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
0341 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
0342 dev_dbg(xsdfec->dev,
0343 "SDFEC disabling irq with IDR failed");
0344 return -EIO;
0345 }
0346 }
0347 return 0;
0348 }
0349
0350 static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
0351 {
0352 u32 mask_read;
0353
0354 if (enable) {
0355
0356 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
0357 XSDFEC_ALL_ECC_ISR_MASK);
0358 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
0359 if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
0360 dev_dbg(xsdfec->dev,
0361 "SDFEC enabling ECC irq with ECC IER failed");
0362 return -EIO;
0363 }
0364 } else {
0365
0366 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
0367 XSDFEC_ALL_ECC_ISR_MASK);
0368 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
0369 if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
0370 XSDFEC_ECC_ISR_MASK) ||
0371 ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
0372 XSDFEC_PL_INIT_ECC_ISR_MASK))) {
0373 dev_dbg(xsdfec->dev,
0374 "SDFEC disable ECC irq with ECC IDR failed");
0375 return -EIO;
0376 }
0377 }
0378 return 0;
0379 }
0380
0381 static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
0382 {
0383 struct xsdfec_irq irq;
0384 int err;
0385 int isr_err;
0386 int ecc_err;
0387
0388 err = copy_from_user(&irq, arg, sizeof(irq));
0389 if (err)
0390 return -EFAULT;
0391
0392
0393 isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
0394 if (!isr_err)
0395 xsdfec->config.irq.enable_isr = irq.enable_isr;
0396
0397
0398 ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
0399 if (!ecc_err)
0400 xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
0401
0402 if (isr_err < 0 || ecc_err < 0)
0403 err = -EIO;
0404
0405 return err;
0406 }
0407
0408 static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
0409 {
0410 struct xsdfec_turbo turbo;
0411 int err;
0412 u32 turbo_write;
0413
0414 err = copy_from_user(&turbo, arg, sizeof(turbo));
0415 if (err)
0416 return -EFAULT;
0417
0418 if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
0419 return -EINVAL;
0420
0421 if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
0422 return -EINVAL;
0423
0424
0425 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
0426 return -EIO;
0427
0428 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
0429 << XSDFEC_TURBO_SCALE_BIT_POS) |
0430 turbo.alg;
0431 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
0432 return err;
0433 }
0434
0435 static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
0436 {
0437 u32 reg_value;
0438 struct xsdfec_turbo turbo_params;
0439 int err;
0440
0441 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
0442 return -EIO;
0443
0444 memset(&turbo_params, 0, sizeof(turbo_params));
0445 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
0446
0447 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
0448 XSDFEC_TURBO_SCALE_BIT_POS;
0449 turbo_params.alg = reg_value & 0x1;
0450
0451 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
0452 if (err)
0453 err = -EFAULT;
0454
0455 return err;
0456 }
0457
0458 static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
0459 u32 offset)
0460 {
0461 u32 wdata;
0462
0463 if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
0464 (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
0465 dev_dbg(xsdfec->dev, "N value is not in range");
0466 return -EINVAL;
0467 }
0468 n <<= XSDFEC_REG0_N_LSB;
0469
0470 if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
0471 (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
0472 dev_dbg(xsdfec->dev, "K value is not in range");
0473 return -EINVAL;
0474 }
0475 k = k << XSDFEC_REG0_K_LSB;
0476 wdata = k | n;
0477
0478 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
0479 XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
0480 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
0481 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
0482 (offset * XSDFEC_LDPC_REG_JUMP));
0483 return -EINVAL;
0484 }
0485 xsdfec_regwrite(xsdfec,
0486 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
0487 (offset * XSDFEC_LDPC_REG_JUMP),
0488 wdata);
0489 return 0;
0490 }
0491
0492 static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
0493 u32 no_packing, u32 nm, u32 offset)
0494 {
0495 u32 wdata;
0496
0497 if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
0498 dev_dbg(xsdfec->dev, "Psize is not in range");
0499 return -EINVAL;
0500 }
0501
0502 if (no_packing != 0 && no_packing != 1)
0503 dev_dbg(xsdfec->dev, "No-packing bit register invalid");
0504 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
0505 XSDFEC_REG1_NO_PACKING_MASK);
0506
0507 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
0508 dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
0509 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
0510
0511 wdata = nm | no_packing | psize;
0512 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
0513 XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
0514 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
0515 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
0516 (offset * XSDFEC_LDPC_REG_JUMP));
0517 return -EINVAL;
0518 }
0519 xsdfec_regwrite(xsdfec,
0520 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
0521 (offset * XSDFEC_LDPC_REG_JUMP),
0522 wdata);
0523 return 0;
0524 }
0525
0526 static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
0527 u32 norm_type, u32 special_qc, u32 no_final_parity,
0528 u32 max_schedule, u32 offset)
0529 {
0530 u32 wdata;
0531
0532 if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
0533 nlayers > XSDFEC_REG2_NLAYERS_MAX) {
0534 dev_dbg(xsdfec->dev, "Nlayers is not in range");
0535 return -EINVAL;
0536 }
0537
0538 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
0539 dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
0540 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
0541
0542 if (norm_type > 1)
0543 dev_dbg(xsdfec->dev, "Norm type is invalid");
0544 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
0545 XSDFEC_REG2_NORM_TYPE_MASK);
0546 if (special_qc > 1)
0547 dev_dbg(xsdfec->dev, "Special QC in invalid");
0548 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
0549 XSDFEC_REG2_SPECIAL_QC_MASK);
0550
0551 if (no_final_parity > 1)
0552 dev_dbg(xsdfec->dev, "No final parity check invalid");
0553 no_final_parity =
0554 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
0555 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
0556 if (max_schedule &
0557 ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
0558 dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
0559 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
0560 XSDFEC_REG2_MAX_SCHEDULE_MASK);
0561
0562 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
0563 nmqc | nlayers);
0564
0565 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
0566 XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
0567 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
0568 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
0569 (offset * XSDFEC_LDPC_REG_JUMP));
0570 return -EINVAL;
0571 }
0572 xsdfec_regwrite(xsdfec,
0573 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
0574 (offset * XSDFEC_LDPC_REG_JUMP),
0575 wdata);
0576 return 0;
0577 }
0578
0579 static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
0580 u16 qc_off, u32 offset)
0581 {
0582 u32 wdata;
0583
0584 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
0585 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
0586 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
0587 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
0588 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
0589 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
0590 (offset * XSDFEC_LDPC_REG_JUMP));
0591 return -EINVAL;
0592 }
0593 xsdfec_regwrite(xsdfec,
0594 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
0595 (offset * XSDFEC_LDPC_REG_JUMP),
0596 wdata);
0597 return 0;
0598 }
0599
0600 static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
0601 u32 *src_ptr, u32 len, const u32 base_addr,
0602 const u32 depth)
0603 {
0604 u32 reg = 0;
0605 int res, i, nr_pages;
0606 u32 n;
0607 u32 *addr = NULL;
0608 struct page *pages[MAX_NUM_PAGES];
0609
0610
0611
0612
0613
0614 if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
0615 len > depth / XSDFEC_REG_WIDTH_JUMP ||
0616 offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
0617 dev_dbg(xsdfec->dev, "Write exceeds SC table length");
0618 return -EINVAL;
0619 }
0620
0621 n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
0622 if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
0623 n += 1;
0624
0625 if (WARN_ON_ONCE(n > INT_MAX))
0626 return -EINVAL;
0627
0628 nr_pages = n;
0629
0630 res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
0631 if (res < nr_pages) {
0632 if (res > 0)
0633 unpin_user_pages(pages, res);
0634
0635 return -EINVAL;
0636 }
0637
0638 for (i = 0; i < nr_pages; i++) {
0639 addr = kmap(pages[i]);
0640 do {
0641 xsdfec_regwrite(xsdfec,
0642 base_addr + ((offset + reg) *
0643 XSDFEC_REG_WIDTH_JUMP),
0644 addr[reg]);
0645 reg++;
0646 } while ((reg < len) &&
0647 ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
0648 unpin_user_page(pages[i]);
0649 }
0650 return 0;
0651 }
0652
0653 static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
0654 {
0655 struct xsdfec_ldpc_params *ldpc;
0656 int ret, n;
0657
0658 ldpc = memdup_user(arg, sizeof(*ldpc));
0659 if (IS_ERR(ldpc))
0660 return PTR_ERR(ldpc);
0661
0662 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
0663 ret = -EIO;
0664 goto err_out;
0665 }
0666
0667
0668 if (xsdfec->state == XSDFEC_STARTED) {
0669 ret = -EIO;
0670 goto err_out;
0671 }
0672
0673 if (xsdfec->config.code_wr_protect) {
0674 ret = -EIO;
0675 goto err_out;
0676 }
0677
0678
0679 ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
0680 ldpc->code_id);
0681 if (ret)
0682 goto err_out;
0683
0684
0685 ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
0686 ldpc->code_id);
0687 if (ret)
0688 goto err_out;
0689
0690
0691 ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
0692 ldpc->norm_type, ldpc->special_qc,
0693 ldpc->no_final_parity, ldpc->max_schedule,
0694 ldpc->code_id);
0695 if (ret)
0696 goto err_out;
0697
0698
0699 ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
0700 ldpc->qc_off, ldpc->code_id);
0701 if (ret)
0702 goto err_out;
0703
0704
0705 n = ldpc->nlayers / 4;
0706 if (ldpc->nlayers % 4)
0707 n++;
0708
0709 ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
0710 XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
0711 XSDFEC_SC_TABLE_DEPTH);
0712 if (ret < 0)
0713 goto err_out;
0714
0715 ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
0716 ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
0717 XSDFEC_LA_TABLE_DEPTH);
0718 if (ret < 0)
0719 goto err_out;
0720
0721 ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
0722 ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
0723 XSDFEC_QC_TABLE_DEPTH);
0724 err_out:
0725 kfree(ldpc);
0726 return ret;
0727 }
0728
0729 static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
0730 {
0731 bool order_invalid;
0732 enum xsdfec_order order;
0733 int err;
0734
0735 err = get_user(order, (enum xsdfec_order __user *)arg);
0736 if (err)
0737 return -EFAULT;
0738
0739 order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
0740 (order != XSDFEC_OUT_OF_ORDER);
0741 if (order_invalid)
0742 return -EINVAL;
0743
0744
0745 if (xsdfec->state == XSDFEC_STARTED)
0746 return -EIO;
0747
0748 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
0749
0750 xsdfec->config.order = order;
0751
0752 return 0;
0753 }
0754
0755 static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
0756 {
0757 bool bypass;
0758 int err;
0759
0760 err = get_user(bypass, arg);
0761 if (err)
0762 return -EFAULT;
0763
0764
0765 if (xsdfec->state == XSDFEC_STARTED)
0766 return -EIO;
0767
0768 if (bypass)
0769 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
0770 else
0771 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
0772
0773 xsdfec->config.bypass = bypass;
0774
0775 return 0;
0776 }
0777
0778 static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
0779 {
0780 u32 reg_value;
0781 bool is_active;
0782 int err;
0783
0784 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
0785
0786 is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
0787 err = put_user(is_active, arg);
0788 if (err)
0789 return -EFAULT;
0790
0791 return err;
0792 }
0793
0794 static u32
0795 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
0796 {
0797 u32 axis_width_field = 0;
0798
0799 switch (axis_width_cfg) {
0800 case XSDFEC_1x128b:
0801 axis_width_field = 0;
0802 break;
0803 case XSDFEC_2x128b:
0804 axis_width_field = 1;
0805 break;
0806 case XSDFEC_4x128b:
0807 axis_width_field = 2;
0808 break;
0809 }
0810
0811 return axis_width_field;
0812 }
0813
0814 static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
0815 axis_word_inc_cfg)
0816 {
0817 u32 axis_words_field = 0;
0818
0819 if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
0820 axis_word_inc_cfg == XSDFEC_IN_BLOCK)
0821 axis_words_field = 0;
0822 else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
0823 axis_words_field = 1;
0824
0825 return axis_words_field;
0826 }
0827
0828 static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
0829 {
0830 u32 reg_value;
0831 u32 dout_words_field;
0832 u32 dout_width_field;
0833 u32 din_words_field;
0834 u32 din_width_field;
0835 struct xsdfec_config *config = &xsdfec->config;
0836
0837
0838 dout_words_field =
0839 xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
0840 dout_width_field =
0841 xsdfec_translate_axis_width_cfg_val(config->dout_width);
0842 din_words_field =
0843 xsdfec_translate_axis_words_cfg_val(config->din_word_include);
0844 din_width_field =
0845 xsdfec_translate_axis_width_cfg_val(config->din_width);
0846
0847 reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
0848 reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
0849 reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
0850 reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
0851
0852 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
0853
0854 return 0;
0855 }
0856
0857 static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
0858 {
0859 return 0;
0860 }
0861
0862 static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
0863 {
0864 return 0;
0865 }
0866
0867 static int xsdfec_start(struct xsdfec_dev *xsdfec)
0868 {
0869 u32 regread;
0870
0871 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
0872 regread &= 0x1;
0873 if (regread != xsdfec->config.code) {
0874 dev_dbg(xsdfec->dev,
0875 "%s SDFEC HW code does not match driver code, reg %d, code %d",
0876 __func__, regread, xsdfec->config.code);
0877 return -EINVAL;
0878 }
0879
0880
0881 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
0882 XSDFEC_AXIS_ENABLE_MASK);
0883
0884 xsdfec->state = XSDFEC_STARTED;
0885 return 0;
0886 }
0887
0888 static int xsdfec_stop(struct xsdfec_dev *xsdfec)
0889 {
0890 u32 regread;
0891
0892 if (xsdfec->state != XSDFEC_STARTED)
0893 dev_dbg(xsdfec->dev, "Device not started correctly");
0894
0895 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
0896 regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
0897 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
0898
0899 xsdfec->state = XSDFEC_STOPPED;
0900 return 0;
0901 }
0902
0903 static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
0904 {
0905 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
0906 xsdfec->isr_err_count = 0;
0907 xsdfec->uecc_count = 0;
0908 xsdfec->cecc_count = 0;
0909 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
0910
0911 return 0;
0912 }
0913
0914 static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
0915 {
0916 int err;
0917 struct xsdfec_stats user_stats;
0918
0919 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
0920 user_stats.isr_err_count = xsdfec->isr_err_count;
0921 user_stats.cecc_count = xsdfec->cecc_count;
0922 user_stats.uecc_count = xsdfec->uecc_count;
0923 xsdfec->stats_updated = false;
0924 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
0925
0926 err = copy_to_user(arg, &user_stats, sizeof(user_stats));
0927 if (err)
0928 err = -EFAULT;
0929
0930 return err;
0931 }
0932
0933 static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
0934 {
0935
0936 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
0937 xsdfec_cfg_axi_streams(xsdfec);
0938 update_config_from_hw(xsdfec);
0939
0940 return 0;
0941 }
0942
0943 static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
0944 unsigned long data)
0945 {
0946 struct xsdfec_dev *xsdfec;
0947 void __user *arg = (void __user *)data;
0948 int rval;
0949
0950 xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
0951
0952
0953 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
0954 (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
0955 cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
0956 return -EPERM;
0957 }
0958
0959 switch (cmd) {
0960 case XSDFEC_START_DEV:
0961 rval = xsdfec_start(xsdfec);
0962 break;
0963 case XSDFEC_STOP_DEV:
0964 rval = xsdfec_stop(xsdfec);
0965 break;
0966 case XSDFEC_CLEAR_STATS:
0967 rval = xsdfec_clear_stats(xsdfec);
0968 break;
0969 case XSDFEC_GET_STATS:
0970 rval = xsdfec_get_stats(xsdfec, arg);
0971 break;
0972 case XSDFEC_GET_STATUS:
0973 rval = xsdfec_get_status(xsdfec, arg);
0974 break;
0975 case XSDFEC_GET_CONFIG:
0976 rval = xsdfec_get_config(xsdfec, arg);
0977 break;
0978 case XSDFEC_SET_DEFAULT_CONFIG:
0979 rval = xsdfec_set_default_config(xsdfec);
0980 break;
0981 case XSDFEC_SET_IRQ:
0982 rval = xsdfec_set_irq(xsdfec, arg);
0983 break;
0984 case XSDFEC_SET_TURBO:
0985 rval = xsdfec_set_turbo(xsdfec, arg);
0986 break;
0987 case XSDFEC_GET_TURBO:
0988 rval = xsdfec_get_turbo(xsdfec, arg);
0989 break;
0990 case XSDFEC_ADD_LDPC_CODE_PARAMS:
0991 rval = xsdfec_add_ldpc(xsdfec, arg);
0992 break;
0993 case XSDFEC_SET_ORDER:
0994 rval = xsdfec_set_order(xsdfec, arg);
0995 break;
0996 case XSDFEC_SET_BYPASS:
0997 rval = xsdfec_set_bypass(xsdfec, arg);
0998 break;
0999 case XSDFEC_IS_ACTIVE:
1000 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1001 break;
1002 default:
1003 rval = -ENOTTY;
1004 break;
1005 }
1006 return rval;
1007 }
1008
1009 static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
1010 {
1011 __poll_t mask = 0;
1012 struct xsdfec_dev *xsdfec;
1013
1014 xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
1015
1016 poll_wait(file, &xsdfec->waitq, wait);
1017
1018
1019 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1020 if (xsdfec->state_updated)
1021 mask |= EPOLLIN | EPOLLPRI;
1022
1023 if (xsdfec->stats_updated)
1024 mask |= EPOLLIN | EPOLLRDNORM;
1025 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1026
1027 return mask;
1028 }
1029
1030 static const struct file_operations xsdfec_fops = {
1031 .owner = THIS_MODULE,
1032 .open = xsdfec_dev_open,
1033 .release = xsdfec_dev_release,
1034 .unlocked_ioctl = xsdfec_dev_ioctl,
1035 .poll = xsdfec_poll,
1036 .compat_ioctl = compat_ptr_ioctl,
1037 };
1038
1039 static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1040 {
1041 struct device *dev = xsdfec->dev;
1042 struct device_node *node = dev->of_node;
1043 int rval;
1044 const char *fec_code;
1045 u32 din_width;
1046 u32 din_word_include;
1047 u32 dout_width;
1048 u32 dout_word_include;
1049
1050 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1051 if (rval < 0)
1052 return rval;
1053
1054 if (!strcasecmp(fec_code, "ldpc"))
1055 xsdfec->config.code = XSDFEC_LDPC_CODE;
1056 else if (!strcasecmp(fec_code, "turbo"))
1057 xsdfec->config.code = XSDFEC_TURBO_CODE;
1058 else
1059 return -EINVAL;
1060
1061 rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
1062 &din_word_include);
1063 if (rval < 0)
1064 return rval;
1065
1066 if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1067 xsdfec->config.din_word_include = din_word_include;
1068 else
1069 return -EINVAL;
1070
1071 rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
1072 if (rval < 0)
1073 return rval;
1074
1075 switch (din_width) {
1076
1077 case XSDFEC_1x128b:
1078 case XSDFEC_2x128b:
1079 case XSDFEC_4x128b:
1080 xsdfec->config.din_width = din_width;
1081 break;
1082 default:
1083 return -EINVAL;
1084 }
1085
1086 rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
1087 &dout_word_include);
1088 if (rval < 0)
1089 return rval;
1090
1091 if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1092 xsdfec->config.dout_word_include = dout_word_include;
1093 else
1094 return -EINVAL;
1095
1096 rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
1097 if (rval < 0)
1098 return rval;
1099
1100 switch (dout_width) {
1101
1102 case XSDFEC_1x128b:
1103 case XSDFEC_2x128b:
1104 case XSDFEC_4x128b:
1105 xsdfec->config.dout_width = dout_width;
1106 break;
1107 default:
1108 return -EINVAL;
1109 }
1110
1111
1112 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
1113
1114 xsdfec_cfg_axi_streams(xsdfec);
1115
1116 return 0;
1117 }
1118
1119 static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
1120 {
1121 struct xsdfec_dev *xsdfec = dev_id;
1122 irqreturn_t ret = IRQ_HANDLED;
1123 u32 ecc_err;
1124 u32 isr_err;
1125 u32 uecc_count;
1126 u32 cecc_count;
1127 u32 isr_err_count;
1128 u32 aecc_count;
1129 u32 tmp;
1130
1131 WARN_ON(xsdfec->irq != irq);
1132
1133
1134 xsdfec_isr_enable(xsdfec, false);
1135 xsdfec_ecc_isr_enable(xsdfec, false);
1136
1137 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1138 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1139
1140 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
1141 xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
1142
1143 tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
1144
1145 uecc_count = hweight32(tmp);
1146
1147 aecc_count = hweight32(ecc_err);
1148
1149 cecc_count = aecc_count - 2 * uecc_count;
1150
1151 isr_err_count = hweight32(isr_err);
1152 dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
1153 uecc_count, aecc_count, cecc_count, isr_err_count);
1154 dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
1155 xsdfec->cecc_count, xsdfec->isr_err_count);
1156
1157 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1158
1159 if (uecc_count)
1160 xsdfec->uecc_count += uecc_count;
1161
1162 if (cecc_count)
1163 xsdfec->cecc_count += cecc_count;
1164
1165 if (isr_err_count)
1166 xsdfec->isr_err_count += isr_err_count;
1167
1168
1169 if (uecc_count) {
1170 if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
1171 xsdfec->state = XSDFEC_NEEDS_RESET;
1172 else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
1173 xsdfec->state = XSDFEC_PL_RECONFIGURE;
1174 xsdfec->stats_updated = true;
1175 xsdfec->state_updated = true;
1176 }
1177
1178 if (cecc_count)
1179 xsdfec->stats_updated = true;
1180
1181 if (isr_err_count) {
1182 xsdfec->state = XSDFEC_NEEDS_RESET;
1183 xsdfec->stats_updated = true;
1184 xsdfec->state_updated = true;
1185 }
1186
1187 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1188 dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
1189 xsdfec->stats_updated);
1190
1191
1192 if (xsdfec->state_updated || xsdfec->stats_updated)
1193 wake_up_interruptible(&xsdfec->waitq);
1194 else
1195 ret = IRQ_NONE;
1196
1197
1198 xsdfec_isr_enable(xsdfec, true);
1199 xsdfec_ecc_isr_enable(xsdfec, true);
1200
1201 return ret;
1202 }
1203
1204 static int xsdfec_clk_init(struct platform_device *pdev,
1205 struct xsdfec_clks *clks)
1206 {
1207 int err;
1208
1209 clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
1210 if (IS_ERR(clks->core_clk)) {
1211 dev_err(&pdev->dev, "failed to get core_clk");
1212 return PTR_ERR(clks->core_clk);
1213 }
1214
1215 clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1216 if (IS_ERR(clks->axi_clk)) {
1217 dev_err(&pdev->dev, "failed to get axi_clk");
1218 return PTR_ERR(clks->axi_clk);
1219 }
1220
1221 clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
1222 if (IS_ERR(clks->din_words_clk)) {
1223 if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
1224 err = PTR_ERR(clks->din_words_clk);
1225 return err;
1226 }
1227 clks->din_words_clk = NULL;
1228 }
1229
1230 clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
1231 if (IS_ERR(clks->din_clk)) {
1232 if (PTR_ERR(clks->din_clk) != -ENOENT) {
1233 err = PTR_ERR(clks->din_clk);
1234 return err;
1235 }
1236 clks->din_clk = NULL;
1237 }
1238
1239 clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
1240 if (IS_ERR(clks->dout_clk)) {
1241 if (PTR_ERR(clks->dout_clk) != -ENOENT) {
1242 err = PTR_ERR(clks->dout_clk);
1243 return err;
1244 }
1245 clks->dout_clk = NULL;
1246 }
1247
1248 clks->dout_words_clk =
1249 devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
1250 if (IS_ERR(clks->dout_words_clk)) {
1251 if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
1252 err = PTR_ERR(clks->dout_words_clk);
1253 return err;
1254 }
1255 clks->dout_words_clk = NULL;
1256 }
1257
1258 clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
1259 if (IS_ERR(clks->ctrl_clk)) {
1260 if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
1261 err = PTR_ERR(clks->ctrl_clk);
1262 return err;
1263 }
1264 clks->ctrl_clk = NULL;
1265 }
1266
1267 clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
1268 if (IS_ERR(clks->status_clk)) {
1269 if (PTR_ERR(clks->status_clk) != -ENOENT) {
1270 err = PTR_ERR(clks->status_clk);
1271 return err;
1272 }
1273 clks->status_clk = NULL;
1274 }
1275
1276 err = clk_prepare_enable(clks->core_clk);
1277 if (err) {
1278 dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
1279 return err;
1280 }
1281
1282 err = clk_prepare_enable(clks->axi_clk);
1283 if (err) {
1284 dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
1285 goto err_disable_core_clk;
1286 }
1287
1288 err = clk_prepare_enable(clks->din_clk);
1289 if (err) {
1290 dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
1291 goto err_disable_axi_clk;
1292 }
1293
1294 err = clk_prepare_enable(clks->din_words_clk);
1295 if (err) {
1296 dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
1297 goto err_disable_din_clk;
1298 }
1299
1300 err = clk_prepare_enable(clks->dout_clk);
1301 if (err) {
1302 dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
1303 goto err_disable_din_words_clk;
1304 }
1305
1306 err = clk_prepare_enable(clks->dout_words_clk);
1307 if (err) {
1308 dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
1309 err);
1310 goto err_disable_dout_clk;
1311 }
1312
1313 err = clk_prepare_enable(clks->ctrl_clk);
1314 if (err) {
1315 dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
1316 goto err_disable_dout_words_clk;
1317 }
1318
1319 err = clk_prepare_enable(clks->status_clk);
1320 if (err) {
1321 dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
1322 goto err_disable_ctrl_clk;
1323 }
1324
1325 return err;
1326
1327 err_disable_ctrl_clk:
1328 clk_disable_unprepare(clks->ctrl_clk);
1329 err_disable_dout_words_clk:
1330 clk_disable_unprepare(clks->dout_words_clk);
1331 err_disable_dout_clk:
1332 clk_disable_unprepare(clks->dout_clk);
1333 err_disable_din_words_clk:
1334 clk_disable_unprepare(clks->din_words_clk);
1335 err_disable_din_clk:
1336 clk_disable_unprepare(clks->din_clk);
1337 err_disable_axi_clk:
1338 clk_disable_unprepare(clks->axi_clk);
1339 err_disable_core_clk:
1340 clk_disable_unprepare(clks->core_clk);
1341
1342 return err;
1343 }
1344
1345 static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
1346 {
1347 clk_disable_unprepare(clks->status_clk);
1348 clk_disable_unprepare(clks->ctrl_clk);
1349 clk_disable_unprepare(clks->dout_words_clk);
1350 clk_disable_unprepare(clks->dout_clk);
1351 clk_disable_unprepare(clks->din_words_clk);
1352 clk_disable_unprepare(clks->din_clk);
1353 clk_disable_unprepare(clks->core_clk);
1354 clk_disable_unprepare(clks->axi_clk);
1355 }
1356
1357 static int xsdfec_probe(struct platform_device *pdev)
1358 {
1359 struct xsdfec_dev *xsdfec;
1360 struct device *dev;
1361 struct resource *res;
1362 int err;
1363 bool irq_enabled = true;
1364
1365 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1366 if (!xsdfec)
1367 return -ENOMEM;
1368
1369 xsdfec->dev = &pdev->dev;
1370 spin_lock_init(&xsdfec->error_data_lock);
1371
1372 err = xsdfec_clk_init(pdev, &xsdfec->clks);
1373 if (err)
1374 return err;
1375
1376 dev = xsdfec->dev;
1377 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1378 xsdfec->regs = devm_ioremap_resource(dev, res);
1379 if (IS_ERR(xsdfec->regs)) {
1380 err = PTR_ERR(xsdfec->regs);
1381 goto err_xsdfec_dev;
1382 }
1383
1384 xsdfec->irq = platform_get_irq(pdev, 0);
1385 if (xsdfec->irq < 0) {
1386 dev_dbg(dev, "platform_get_irq failed");
1387 irq_enabled = false;
1388 }
1389
1390 err = xsdfec_parse_of(xsdfec);
1391 if (err < 0)
1392 goto err_xsdfec_dev;
1393
1394 update_config_from_hw(xsdfec);
1395
1396
1397 platform_set_drvdata(pdev, xsdfec);
1398
1399 if (irq_enabled) {
1400 init_waitqueue_head(&xsdfec->waitq);
1401
1402 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1403 xsdfec_irq_thread, IRQF_ONESHOT,
1404 "xilinx-sdfec16", xsdfec);
1405 if (err < 0) {
1406 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1407 goto err_xsdfec_dev;
1408 }
1409 }
1410
1411 err = ida_alloc(&dev_nrs, GFP_KERNEL);
1412 if (err < 0)
1413 goto err_xsdfec_dev;
1414 xsdfec->dev_id = err;
1415
1416 snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
1417 xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
1418 xsdfec->miscdev.name = xsdfec->dev_name;
1419 xsdfec->miscdev.fops = &xsdfec_fops;
1420 xsdfec->miscdev.parent = dev;
1421 err = misc_register(&xsdfec->miscdev);
1422 if (err) {
1423 dev_err(dev, "error:%d. Unable to register device", err);
1424 goto err_xsdfec_ida;
1425 }
1426 return 0;
1427
1428 err_xsdfec_ida:
1429 ida_free(&dev_nrs, xsdfec->dev_id);
1430 err_xsdfec_dev:
1431 xsdfec_disable_all_clks(&xsdfec->clks);
1432 return err;
1433 }
1434
1435 static int xsdfec_remove(struct platform_device *pdev)
1436 {
1437 struct xsdfec_dev *xsdfec;
1438
1439 xsdfec = platform_get_drvdata(pdev);
1440 misc_deregister(&xsdfec->miscdev);
1441 ida_free(&dev_nrs, xsdfec->dev_id);
1442 xsdfec_disable_all_clks(&xsdfec->clks);
1443 return 0;
1444 }
1445
1446 static const struct of_device_id xsdfec_of_match[] = {
1447 {
1448 .compatible = "xlnx,sd-fec-1.1",
1449 },
1450 { }
1451 };
1452 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1453
1454 static struct platform_driver xsdfec_driver = {
1455 .driver = {
1456 .name = "xilinx-sdfec",
1457 .of_match_table = xsdfec_of_match,
1458 },
1459 .probe = xsdfec_probe,
1460 .remove = xsdfec_remove,
1461 };
1462
1463 module_platform_driver(xsdfec_driver);
1464
1465 MODULE_AUTHOR("Xilinx, Inc");
1466 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1467 MODULE_LICENSE("GPL");