0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/pci.h>
0019 #include <linux/netdevice.h>
0020 #include "liquidio_common.h"
0021 #include "octeon_droq.h"
0022 #include "octeon_iq.h"
0023 #include "response_manager.h"
0024 #include "octeon_device.h"
0025 #include "octeon_main.h"
0026 #include "cn66xx_regs.h"
0027 #include "cn66xx_device.h"
0028
0029 int lio_cn6xxx_soft_reset(struct octeon_device *oct)
0030 {
0031 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
0032
0033 dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
0034
0035 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
0036 octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
0037
0038 lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
0039 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
0040
0041
0042 mdelay(100);
0043
0044 if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
0045 dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
0046 return 1;
0047 }
0048
0049 dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
0050 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
0051
0052 return 0;
0053 }
0054
0055 void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
0056 {
0057 u32 val;
0058
0059 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
0060 if (val & 0x000c0000) {
0061 dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
0062 val & 0x000c0000);
0063 }
0064
0065 val |= 0xf;
0066
0067 dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
0068 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
0069 }
0070
0071 void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
0072 enum octeon_pcie_mps mps)
0073 {
0074 u32 val;
0075 u64 r64;
0076
0077
0078 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
0079
0080 if (mps == PCIE_MPS_DEFAULT) {
0081 mps = ((val & (0x7 << 5)) >> 5);
0082 } else {
0083 val &= ~(0x7 << 5);
0084 val |= (mps << 5);
0085 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
0086 }
0087
0088
0089 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
0090 r64 |= (mps << 4);
0091 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
0092 }
0093
0094 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
0095 enum octeon_pcie_mrrs mrrs)
0096 {
0097 u32 val;
0098 u64 r64;
0099
0100
0101 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
0102
0103 if (mrrs == PCIE_MRRS_DEFAULT) {
0104 mrrs = ((val & (0x7 << 12)) >> 12);
0105 } else {
0106 val &= ~(0x7 << 12);
0107 val |= (mrrs << 12);
0108 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
0109 }
0110
0111
0112 r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
0113 r64 |= mrrs;
0114 octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
0115
0116
0117 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
0118 r64 |= mrrs;
0119 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
0120 }
0121
0122 u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
0123 {
0124
0125
0126
0127 return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
0128 }
0129
0130 u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
0131 u32 time_intr_in_us)
0132 {
0133
0134 u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
0135
0136
0137
0138
0139
0140
0141 oqticks_per_us *= 1000;
0142
0143
0144 oqticks_per_us /= 1024;
0145
0146
0147
0148
0149 oqticks_per_us *= time_intr_in_us;
0150 oqticks_per_us /= 1000;
0151
0152 return oqticks_per_us;
0153 }
0154
0155 void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
0156 {
0157
0158 octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
0159 CN6XXX_INPUT_CTL_MASK);
0160
0161
0162 octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
0163 0xFFFFFFFFFFFFFFFFULL);
0164
0165
0166 octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
0167 (oct->pcie_port * 0x5555555555555555ULL));
0168 }
0169
0170 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
0171 {
0172 u64 pktctl;
0173
0174 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0175
0176 pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
0177
0178
0179 if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
0180
0181 pktctl &= ~(1 << 4);
0182 else
0183 pktctl |= (1 << 4);
0184
0185 if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
0186 pktctl |= 0xF;
0187 else
0188
0189 pktctl &= ~0xF;
0190 octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
0191 }
0192
0193 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
0194 {
0195 u32 time_threshold;
0196 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0197
0198
0199 octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
0200 (oct->pcie_port * 0x5555555555555555ULL));
0201
0202 if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
0203 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
0204 } else {
0205
0206 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
0207 }
0208
0209
0210 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
0211
0212
0213
0214
0215 octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
0216
0217
0218
0219
0220 octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
0221 octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
0222
0223
0224 #ifdef __BIG_ENDIAN_BITFIELD
0225 octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
0226 0x5555555555555555ULL);
0227 #else
0228 octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
0229 #endif
0230
0231
0232 octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
0233 octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
0234 octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
0235 0x5555555555555555ULL);
0236
0237
0238 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
0239 (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
0240 time_threshold =
0241 lio_cn6xxx_get_oq_ticks(oct, (u32)
0242 CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
0243
0244 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
0245 }
0246
0247 static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
0248 {
0249 lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
0250 lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
0251 lio_cn6xxx_enable_error_reporting(oct);
0252
0253 lio_cn6xxx_setup_global_input_regs(oct);
0254 lio_cn66xx_setup_pkt_ctl_regs(oct);
0255 lio_cn6xxx_setup_global_output_regs(oct);
0256
0257
0258
0259
0260 octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
0261 return 0;
0262 }
0263
0264 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
0265 {
0266 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
0267
0268 octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
0269
0270
0271 octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
0272 iq->base_addr_dma);
0273 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
0274
0275
0276
0277
0278 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
0279 iq->inst_cnt_reg = oct->mmio[0].hw_addr
0280 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
0281 dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
0282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
0283
0284
0285
0286
0287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
0288 }
0289
0290 static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
0291 {
0292 lio_cn6xxx_setup_iq_regs(oct, iq_no);
0293
0294
0295
0296
0297 octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
0298 (0xFFFFFFFFULL << 32));
0299 }
0300
0301 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
0302 {
0303 u32 intr;
0304 struct octeon_droq *droq = oct->droq[oq_no];
0305
0306 octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
0307 droq->desc_ring_dma);
0308 octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
0309
0310 octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
0311 droq->buffer_size);
0312
0313
0314 droq->pkts_sent_reg =
0315 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
0316 droq->pkts_credit_reg =
0317 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
0318
0319
0320 intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
0321 intr |= (1 << oq_no);
0322 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
0323
0324
0325 intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
0326 intr |= (1 << oq_no);
0327 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
0328 }
0329
0330 int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
0331 {
0332 u32 mask;
0333
0334 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
0335 mask |= oct->io_qmask.iq64B;
0336 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
0337
0338 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
0339 mask |= oct->io_qmask.iq;
0340 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
0341
0342 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
0343 mask |= oct->io_qmask.oq;
0344 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
0345
0346 return 0;
0347 }
0348
0349 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
0350 {
0351 int i;
0352 u32 mask, loop = HZ;
0353 u32 d32;
0354
0355
0356 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
0357 mask ^= oct->io_qmask.iq;
0358 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
0359
0360
0361 mask = (u32)oct->io_qmask.iq;
0362 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
0363 while (((d32 & mask) != mask) && loop--) {
0364 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
0365 schedule_timeout_uninterruptible(1);
0366 }
0367
0368
0369 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0370 if (!(oct->io_qmask.iq & BIT_ULL(i)))
0371 continue;
0372 octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
0373 d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
0374 }
0375
0376
0377 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
0378 mask ^= oct->io_qmask.oq;
0379 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
0380
0381
0382 loop = HZ;
0383 mask = (u32)oct->io_qmask.oq;
0384 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
0385 while (((d32 & mask) != mask) && loop--) {
0386 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
0387 schedule_timeout_uninterruptible(1);
0388 }
0389 ;
0390
0391
0392 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
0393 if (!(oct->io_qmask.oq & BIT_ULL(i)))
0394 continue;
0395 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
0396 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
0397
0398 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
0399 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
0400 }
0401
0402 d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
0403 if (d32)
0404 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
0405
0406 d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
0407 if (d32)
0408 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
0409 }
0410
0411 void
0412 lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
0413 u64 core_addr,
0414 u32 idx,
0415 int valid)
0416 {
0417 u64 bar1;
0418
0419 if (valid == 0) {
0420 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
0421 lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
0422 CN6XXX_BAR1_REG(idx, oct->pcie_port));
0423 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
0424 return;
0425 }
0426
0427
0428
0429
0430 lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
0431 CN6XXX_BAR1_REG(idx, oct->pcie_port));
0432
0433 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
0434 }
0435
0436 void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
0437 u32 idx,
0438 u32 mask)
0439 {
0440 lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
0441 }
0442
0443 u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
0444 {
0445 return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
0446 }
0447
0448 u32
0449 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
0450 {
0451 u32 new_idx = readl(iq->inst_cnt_reg);
0452
0453
0454
0455
0456
0457 if (iq->reset_instr_cnt < new_idx)
0458 new_idx -= iq->reset_instr_cnt;
0459 else
0460 new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
0461
0462
0463
0464
0465 new_idx %= iq->max_count;
0466
0467 return new_idx;
0468 }
0469
0470 void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
0471 u8 unused __attribute__((unused)))
0472 {
0473 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0474 u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
0475
0476
0477 writeq(mask, cn6xxx->intr_enb_reg64);
0478 }
0479
0480 void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
0481 u8 unused __attribute__((unused)))
0482 {
0483 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0484
0485
0486 writeq(0, cn6xxx->intr_enb_reg64);
0487 }
0488
0489 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
0490 {
0491
0492
0493
0494 oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
0495
0496 dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
0497 }
0498
0499 static void
0500 lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
0501 {
0502 dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
0503 CVM_CAST64(intr64));
0504 }
0505
0506 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
0507 {
0508 struct octeon_droq *droq;
0509 int oq_no;
0510 u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
0511 u32 droq_cnt_enb, droq_cnt_mask;
0512
0513 droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
0514 droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
0515 droq_mask = droq_cnt_mask & droq_cnt_enb;
0516
0517 droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
0518 droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
0519 droq_mask |= (droq_time_mask & droq_int_enb);
0520
0521 droq_mask &= oct->io_qmask.oq;
0522
0523 oct->droq_intr = 0;
0524
0525 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
0526 if (!(droq_mask & BIT_ULL(oq_no)))
0527 continue;
0528
0529 droq = oct->droq[oq_no];
0530 pkt_count = octeon_droq_check_hw_for_pkts(droq);
0531 if (pkt_count) {
0532 oct->droq_intr |= BIT_ULL(oq_no);
0533 if (droq->ops.poll_mode) {
0534 u32 value;
0535 u32 reg;
0536
0537 struct octeon_cn6xxx *cn6xxx =
0538 (struct octeon_cn6xxx *)oct->chip;
0539
0540
0541 spin_lock
0542 (&cn6xxx->lock_for_droq_int_enb_reg);
0543 reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
0544 value = octeon_read_csr(oct, reg);
0545 value &= ~(1 << oq_no);
0546 octeon_write_csr(oct, reg, value);
0547 reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
0548 value = octeon_read_csr(oct, reg);
0549 value &= ~(1 << oq_no);
0550 octeon_write_csr(oct, reg, value);
0551
0552 spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
0553 }
0554 }
0555 }
0556
0557 droq_time_mask &= oct->io_qmask.oq;
0558 droq_cnt_mask &= oct->io_qmask.oq;
0559
0560
0561 if (droq_time_mask)
0562 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
0563
0564 if (droq_cnt_mask)
0565 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
0566
0567 return 0;
0568 }
0569
0570 irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
0571 {
0572 struct octeon_device *oct = (struct octeon_device *)dev;
0573 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0574 u64 intr64;
0575
0576 intr64 = readq(cn6xxx->intr_sum_reg64);
0577
0578
0579
0580
0581
0582 if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
0583 return IRQ_NONE;
0584
0585 oct->int_status = 0;
0586
0587 if (intr64 & CN6XXX_INTR_ERR)
0588 lio_cn6xxx_process_pcie_error_intr(oct, intr64);
0589
0590 if (intr64 & CN6XXX_INTR_PKT_DATA) {
0591 lio_cn6xxx_process_droq_intr_regs(oct);
0592 oct->int_status |= OCT_DEV_INTR_PKT_DATA;
0593 }
0594
0595 if (intr64 & CN6XXX_INTR_DMA0_FORCE)
0596 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
0597
0598 if (intr64 & CN6XXX_INTR_DMA1_FORCE)
0599 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
0600
0601
0602 writeq(intr64, cn6xxx->intr_sum_reg64);
0603
0604 return IRQ_HANDLED;
0605 }
0606
0607 void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
0608 void *chip,
0609 struct octeon_reg_list *reg_list)
0610 {
0611 u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
0612 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
0613
0614 reg_list->pci_win_wr_addr_hi =
0615 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
0616 reg_list->pci_win_wr_addr_lo =
0617 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
0618 reg_list->pci_win_wr_addr =
0619 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
0620
0621 reg_list->pci_win_rd_addr_hi =
0622 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
0623 reg_list->pci_win_rd_addr_lo =
0624 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
0625 reg_list->pci_win_rd_addr =
0626 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
0627
0628 reg_list->pci_win_wr_data_hi =
0629 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
0630 reg_list->pci_win_wr_data_lo =
0631 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
0632 reg_list->pci_win_wr_data =
0633 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
0634
0635 reg_list->pci_win_rd_data_hi =
0636 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
0637 reg_list->pci_win_rd_data_lo =
0638 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
0639 reg_list->pci_win_rd_data =
0640 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
0641
0642 lio_cn6xxx_get_pcie_qlmport(oct);
0643
0644 cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
0645 cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
0646 cn6xxx->intr_enb_reg64 =
0647 bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
0648 }
0649
0650 int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
0651 {
0652 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
0653
0654 if (octeon_map_pci_barx(oct, 0, 0))
0655 return 1;
0656
0657 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
0658 dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
0659 __func__);
0660 octeon_unmap_pci_barx(oct, 0);
0661 return 1;
0662 }
0663
0664 spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
0665
0666 oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
0667 oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
0668
0669 oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
0670 oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
0671 oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
0672
0673 oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
0674 oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
0675 oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
0676
0677 oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
0678 oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
0679 oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
0680
0681 oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
0682 oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
0683
0684 lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
0685
0686 cn6xxx->conf = (struct octeon_config *)
0687 oct_get_config_info(oct, LIO_210SV);
0688 if (!cn6xxx->conf) {
0689 dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
0690 __func__);
0691 octeon_unmap_pci_barx(oct, 0);
0692 octeon_unmap_pci_barx(oct, 1);
0693 return 1;
0694 }
0695
0696 oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
0697
0698 return 0;
0699 }
0700
0701 int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
0702 struct octeon_config *conf6xxx)
0703 {
0704 if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
0705 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
0706 __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
0707 CN6XXX_MAX_INPUT_QUEUES);
0708 return 1;
0709 }
0710
0711 if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
0712 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
0713 __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
0714 CN6XXX_MAX_OUTPUT_QUEUES);
0715 return 1;
0716 }
0717
0718 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
0719 CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
0720 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
0721 __func__);
0722 return 1;
0723 }
0724 if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
0725 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
0726 __func__);
0727 return 1;
0728 }
0729
0730 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
0731 dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
0732 __func__);
0733 return 1;
0734 }
0735
0736 return 0;
0737 }