0001
0002 #include <linux/delay.h>
0003
0004 #include "nitrox_dev.h"
0005 #include "nitrox_csr.h"
0006 #include "nitrox_hal.h"
0007
0008 #define PLL_REF_CLK 50
0009 #define MAX_CSR_RETRIES 10
0010
0011
0012
0013
0014
0015 static void emu_enable_cores(struct nitrox_device *ndev)
0016 {
0017 union emu_se_enable emu_se;
0018 union emu_ae_enable emu_ae;
0019 int i;
0020
0021
0022 emu_ae.value = 0;
0023 emu_ae.s.enable = 0xfffff;
0024
0025
0026 emu_se.value = 0;
0027 emu_se.s.enable = 0xffff;
0028
0029
0030 for (i = 0; i < NR_CLUSTERS; i++) {
0031 nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
0032 nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
0033 }
0034 }
0035
0036
0037
0038
0039
0040 void nitrox_config_emu_unit(struct nitrox_device *ndev)
0041 {
0042 union emu_wd_int_ena_w1s emu_wd_int;
0043 union emu_ge_int_ena_w1s emu_ge_int;
0044 u64 offset;
0045 int i;
0046
0047
0048 emu_enable_cores(ndev);
0049
0050
0051 emu_ge_int.value = 0;
0052 emu_ge_int.s.se_ge = 0xffff;
0053 emu_ge_int.s.ae_ge = 0xfffff;
0054 emu_wd_int.value = 0;
0055 emu_wd_int.s.se_wd = 1;
0056
0057 for (i = 0; i < NR_CLUSTERS; i++) {
0058 offset = EMU_WD_INT_ENA_W1SX(i);
0059 nitrox_write_csr(ndev, offset, emu_wd_int.value);
0060 offset = EMU_GE_INT_ENA_W1SX(i);
0061 nitrox_write_csr(ndev, offset, emu_ge_int.value);
0062 }
0063 }
0064
0065 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
0066 {
0067 union nps_pkt_in_instr_ctl pkt_in_ctl;
0068 union nps_pkt_in_done_cnts pkt_in_cnts;
0069 int max_retries = MAX_CSR_RETRIES;
0070 u64 offset;
0071
0072
0073 offset = NPS_PKT_IN_INSTR_CTLX(ring);
0074 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
0075 pkt_in_ctl.s.enb = 0;
0076 nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
0077
0078
0079 usleep_range(100, 150);
0080 do {
0081 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
0082 if (!pkt_in_ctl.s.enb)
0083 break;
0084 udelay(50);
0085 } while (max_retries--);
0086
0087
0088 offset = NPS_PKT_IN_DONE_CNTSX(ring);
0089 pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
0090 nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
0091 usleep_range(50, 100);
0092 }
0093
0094 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
0095 {
0096 union nps_pkt_in_instr_ctl pkt_in_ctl;
0097 int max_retries = MAX_CSR_RETRIES;
0098 u64 offset;
0099
0100
0101 offset = NPS_PKT_IN_INSTR_CTLX(ring);
0102 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
0103 pkt_in_ctl.s.is64b = 1;
0104 pkt_in_ctl.s.enb = 1;
0105 nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
0106
0107
0108 do {
0109 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
0110 if (pkt_in_ctl.s.enb)
0111 break;
0112 udelay(50);
0113 } while (max_retries--);
0114 }
0115
0116
0117
0118
0119
0120 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
0121 {
0122 int i;
0123
0124 for (i = 0; i < ndev->nr_queues; i++) {
0125 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
0126 union nps_pkt_in_instr_rsize pkt_in_rsize;
0127 union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
0128 u64 offset;
0129
0130 reset_pkt_input_ring(ndev, i);
0131
0132
0133
0134
0135
0136
0137 offset = NPS_PKT_IN_INSTR_BADDRX(i);
0138 nitrox_write_csr(ndev, offset, cmdq->dma);
0139
0140
0141 offset = NPS_PKT_IN_INSTR_RSIZEX(i);
0142 pkt_in_rsize.value = 0;
0143 pkt_in_rsize.s.rsize = ndev->qlen;
0144 nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
0145
0146
0147 offset = NPS_PKT_IN_INT_LEVELSX(i);
0148 nitrox_write_csr(ndev, offset, 0xffffffff);
0149
0150
0151 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
0152 pkt_in_dbell.value = 0;
0153 pkt_in_dbell.s.dbell = 0xffffffff;
0154 nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
0155
0156
0157 enable_pkt_input_ring(ndev, i);
0158 }
0159 }
0160
0161 static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
0162 {
0163 union nps_pkt_slc_ctl pkt_slc_ctl;
0164 union nps_pkt_slc_cnts pkt_slc_cnts;
0165 int max_retries = MAX_CSR_RETRIES;
0166 u64 offset;
0167
0168
0169 offset = NPS_PKT_SLC_CTLX(port);
0170 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
0171 pkt_slc_ctl.s.enb = 0;
0172 nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
0173
0174
0175 usleep_range(100, 150);
0176
0177 do {
0178 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
0179 if (!pkt_slc_ctl.s.enb)
0180 break;
0181 udelay(50);
0182 } while (max_retries--);
0183
0184
0185 offset = NPS_PKT_SLC_CNTSX(port);
0186 pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
0187 nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
0188 usleep_range(50, 100);
0189 }
0190
0191 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
0192 {
0193 union nps_pkt_slc_ctl pkt_slc_ctl;
0194 int max_retries = MAX_CSR_RETRIES;
0195 u64 offset;
0196
0197 offset = NPS_PKT_SLC_CTLX(port);
0198 pkt_slc_ctl.value = 0;
0199 pkt_slc_ctl.s.enb = 1;
0200
0201
0202
0203
0204 pkt_slc_ctl.s.z = 1;
0205
0206 pkt_slc_ctl.s.rh = 1;
0207 nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
0208
0209
0210 do {
0211 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
0212 if (pkt_slc_ctl.s.enb)
0213 break;
0214 udelay(50);
0215 } while (max_retries--);
0216 }
0217
0218 static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
0219 {
0220 union nps_pkt_slc_int_levels pkt_slc_int;
0221 u64 offset;
0222
0223 reset_pkt_solicit_port(ndev, port);
0224
0225
0226 offset = NPS_PKT_SLC_INT_LEVELSX(port);
0227 pkt_slc_int.value = 0;
0228
0229 pkt_slc_int.s.timet = 0x3fffff;
0230 nitrox_write_csr(ndev, offset, pkt_slc_int.value);
0231
0232
0233 enable_pkt_solicit_port(ndev, port);
0234 }
0235
0236 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
0237 {
0238 int i;
0239
0240 for (i = 0; i < ndev->nr_queues; i++)
0241 config_pkt_solicit_port(ndev, i);
0242 }
0243
0244
0245
0246
0247
0248
0249
0250 static void enable_nps_core_interrupts(struct nitrox_device *ndev)
0251 {
0252 union nps_core_int_ena_w1s core_int;
0253
0254
0255 core_int.value = 0;
0256 core_int.s.host_wr_err = 1;
0257 core_int.s.host_wr_timeout = 1;
0258 core_int.s.exec_wr_timeout = 1;
0259 core_int.s.npco_dma_malform = 1;
0260 core_int.s.host_nps_wr_err = 1;
0261 nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
0262 }
0263
0264 void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
0265 {
0266 union nps_core_gbl_vfcfg core_gbl_vfcfg;
0267
0268
0269 nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
0270
0271
0272 core_gbl_vfcfg.value = 0;
0273 core_gbl_vfcfg.s.ilk_disable = 1;
0274 core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
0275 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
0276
0277
0278 enable_nps_core_interrupts(ndev);
0279 }
0280
0281
0282
0283
0284
0285
0286
0287 static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
0288 {
0289
0290 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
0291 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
0292 nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
0293
0294 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
0295 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
0296 nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
0297 }
0298
0299 void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
0300 {
0301
0302 nitrox_config_pkt_input_rings(ndev);
0303 nitrox_config_pkt_solicit_ports(ndev);
0304
0305
0306 enable_nps_pkt_interrupts(ndev);
0307 }
0308
0309 static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
0310 {
0311 union aqmq_en aqmq_en_reg;
0312 union aqmq_activity_stat activity_stat;
0313 union aqmq_cmp_cnt cmp_cnt;
0314 int max_retries = MAX_CSR_RETRIES;
0315 u64 offset;
0316
0317
0318 offset = AQMQ_ENX(ring);
0319 aqmq_en_reg.value = 0;
0320 aqmq_en_reg.queue_enable = 0;
0321 nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
0322
0323
0324 usleep_range(100, 150);
0325 offset = AQMQ_ACTIVITY_STATX(ring);
0326 do {
0327 activity_stat.value = nitrox_read_csr(ndev, offset);
0328 if (!activity_stat.queue_active)
0329 break;
0330 udelay(50);
0331 } while (max_retries--);
0332
0333
0334 offset = AQMQ_CMP_CNTX(ring);
0335 cmp_cnt.value = nitrox_read_csr(ndev, offset);
0336 nitrox_write_csr(ndev, offset, cmp_cnt.value);
0337 usleep_range(50, 100);
0338 }
0339
0340 void enable_aqm_ring(struct nitrox_device *ndev, int ring)
0341 {
0342 union aqmq_en aqmq_en_reg;
0343 u64 offset;
0344
0345 offset = AQMQ_ENX(ring);
0346 aqmq_en_reg.value = 0;
0347 aqmq_en_reg.queue_enable = 1;
0348 nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
0349 usleep_range(50, 100);
0350 }
0351
0352 void nitrox_config_aqm_rings(struct nitrox_device *ndev)
0353 {
0354 int ring;
0355
0356 for (ring = 0; ring < ndev->nr_queues; ring++) {
0357 struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
0358 union aqmq_drbl drbl;
0359 union aqmq_qsz qsize;
0360 union aqmq_cmp_thr cmp_thr;
0361 u64 offset;
0362
0363
0364 reset_aqm_ring(ndev, ring);
0365
0366
0367 offset = AQMQ_DRBLX(ring);
0368 drbl.value = 0;
0369 drbl.dbell_count = 0xFFFFFFFF;
0370 nitrox_write_csr(ndev, offset, drbl.value);
0371
0372
0373
0374
0375 offset = AQMQ_NXT_CMDX(ring);
0376 nitrox_write_csr(ndev, offset, 0ULL);
0377
0378
0379 offset = AQMQ_BADRX(ring);
0380 nitrox_write_csr(ndev, offset, cmdq->dma);
0381
0382
0383 offset = AQMQ_QSZX(ring);
0384 qsize.value = 0;
0385 qsize.host_queue_size = ndev->qlen;
0386 nitrox_write_csr(ndev, offset, qsize.value);
0387
0388
0389 offset = AQMQ_CMP_THRX(ring);
0390 cmp_thr.value = 0;
0391 cmp_thr.commands_completed_threshold = 1;
0392 nitrox_write_csr(ndev, offset, cmp_thr.value);
0393
0394
0395 enable_aqm_ring(ndev, ring);
0396 }
0397 }
0398
0399 static void enable_aqm_interrupts(struct nitrox_device *ndev)
0400 {
0401
0402 nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
0403 nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
0404 nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
0405 nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
0406 nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
0407 nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
0408 nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
0409 nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
0410 }
0411
0412 void nitrox_config_aqm_unit(struct nitrox_device *ndev)
0413 {
0414
0415 nitrox_config_aqm_rings(ndev);
0416
0417
0418 enable_aqm_interrupts(ndev);
0419 }
0420
0421 void nitrox_config_pom_unit(struct nitrox_device *ndev)
0422 {
0423 union pom_int_ena_w1s pom_int;
0424 int i;
0425
0426
0427 pom_int.value = 0;
0428 pom_int.s.illegal_dport = 1;
0429 nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
0430
0431
0432 for (i = 0; i < ndev->hw.se_cores; i++)
0433 nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
0434 }
0435
0436
0437
0438
0439
0440 void nitrox_config_rand_unit(struct nitrox_device *ndev)
0441 {
0442 union efl_rnm_ctl_status efl_rnm_ctl;
0443 u64 offset;
0444
0445 offset = EFL_RNM_CTL_STATUS;
0446 efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
0447 efl_rnm_ctl.s.ent_en = 1;
0448 efl_rnm_ctl.s.rng_en = 1;
0449 nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
0450 }
0451
0452 void nitrox_config_efl_unit(struct nitrox_device *ndev)
0453 {
0454 int i;
0455
0456 for (i = 0; i < NR_CLUSTERS; i++) {
0457 union efl_core_int_ena_w1s efl_core_int;
0458 u64 offset;
0459
0460
0461 offset = EFL_CORE_INT_ENA_W1SX(i);
0462 efl_core_int.value = 0;
0463 efl_core_int.s.len_ovr = 1;
0464 efl_core_int.s.d_left = 1;
0465 efl_core_int.s.epci_decode_err = 1;
0466 nitrox_write_csr(ndev, offset, efl_core_int.value);
0467
0468 offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
0469 nitrox_write_csr(ndev, offset, (~0ULL));
0470 offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
0471 nitrox_write_csr(ndev, offset, (~0ULL));
0472 }
0473 }
0474
0475 void nitrox_config_bmi_unit(struct nitrox_device *ndev)
0476 {
0477 union bmi_ctl bmi_ctl;
0478 union bmi_int_ena_w1s bmi_int_ena;
0479 u64 offset;
0480
0481
0482 offset = BMI_CTL;
0483 bmi_ctl.value = nitrox_read_csr(ndev, offset);
0484 bmi_ctl.s.max_pkt_len = 0xff;
0485 bmi_ctl.s.nps_free_thrsh = 0xff;
0486 bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
0487 nitrox_write_csr(ndev, offset, bmi_ctl.value);
0488
0489
0490 offset = BMI_INT_ENA_W1S;
0491 bmi_int_ena.value = 0;
0492 bmi_int_ena.s.max_len_err_nps = 1;
0493 bmi_int_ena.s.pkt_rcv_err_nps = 1;
0494 bmi_int_ena.s.fpf_undrrn = 1;
0495 nitrox_write_csr(ndev, offset, bmi_int_ena.value);
0496 }
0497
0498 void nitrox_config_bmo_unit(struct nitrox_device *ndev)
0499 {
0500 union bmo_ctl2 bmo_ctl2;
0501 u64 offset;
0502
0503
0504 offset = BMO_CTL2;
0505 bmo_ctl2.value = nitrox_read_csr(ndev, offset);
0506 bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
0507 nitrox_write_csr(ndev, offset, bmo_ctl2.value);
0508 }
0509
0510 void invalidate_lbc(struct nitrox_device *ndev)
0511 {
0512 union lbc_inval_ctl lbc_ctl;
0513 union lbc_inval_status lbc_stat;
0514 int max_retries = MAX_CSR_RETRIES;
0515 u64 offset;
0516
0517
0518 offset = LBC_INVAL_CTL;
0519 lbc_ctl.value = nitrox_read_csr(ndev, offset);
0520 lbc_ctl.s.cam_inval_start = 1;
0521 nitrox_write_csr(ndev, offset, lbc_ctl.value);
0522
0523 offset = LBC_INVAL_STATUS;
0524 do {
0525 lbc_stat.value = nitrox_read_csr(ndev, offset);
0526 if (lbc_stat.s.done)
0527 break;
0528 udelay(50);
0529 } while (max_retries--);
0530 }
0531
0532 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
0533 {
0534 union lbc_int_ena_w1s lbc_int_ena;
0535 u64 offset;
0536
0537 invalidate_lbc(ndev);
0538
0539
0540 offset = LBC_INT_ENA_W1S;
0541 lbc_int_ena.value = 0;
0542 lbc_int_ena.s.dma_rd_err = 1;
0543 lbc_int_ena.s.over_fetch_err = 1;
0544 lbc_int_ena.s.cam_inval_abort = 1;
0545 lbc_int_ena.s.cam_hard_err = 1;
0546 nitrox_write_csr(ndev, offset, lbc_int_ena.value);
0547
0548 offset = LBC_PLM_VF1_64_INT_ENA_W1S;
0549 nitrox_write_csr(ndev, offset, (~0ULL));
0550 offset = LBC_PLM_VF65_128_INT_ENA_W1S;
0551 nitrox_write_csr(ndev, offset, (~0ULL));
0552
0553 offset = LBC_ELM_VF1_64_INT_ENA_W1S;
0554 nitrox_write_csr(ndev, offset, (~0ULL));
0555 offset = LBC_ELM_VF65_128_INT_ENA_W1S;
0556 nitrox_write_csr(ndev, offset, (~0ULL));
0557 }
0558
0559 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
0560 {
0561 union nps_core_gbl_vfcfg vfcfg;
0562
0563 vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
0564 vfcfg.s.cfg = mode & 0x7;
0565
0566 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
0567 }
0568
0569 static const char *get_core_option(u8 se_cores, u8 ae_cores)
0570 {
0571 const char *option = "";
0572
0573 if (ae_cores == AE_MAX_CORES) {
0574 switch (se_cores) {
0575 case SE_MAX_CORES:
0576 option = "60";
0577 break;
0578 case 40:
0579 option = "60s";
0580 break;
0581 }
0582 } else if (ae_cores == (AE_MAX_CORES / 2)) {
0583 option = "30";
0584 } else {
0585 option = "60i";
0586 }
0587
0588 return option;
0589 }
0590
0591 static const char *get_feature_option(u8 zip_cores, int core_freq)
0592 {
0593 if (zip_cores == 0)
0594 return "";
0595 else if (zip_cores < ZIP_MAX_CORES)
0596 return "-C15";
0597
0598 if (core_freq >= 850)
0599 return "-C45";
0600 else if (core_freq >= 750)
0601 return "-C35";
0602 else if (core_freq >= 550)
0603 return "-C25";
0604
0605 return "";
0606 }
0607
0608 void nitrox_get_hwinfo(struct nitrox_device *ndev)
0609 {
0610 union emu_fuse_map emu_fuse;
0611 union rst_boot rst_boot;
0612 union fus_dat1 fus_dat1;
0613 unsigned char name[IFNAMSIZ * 2] = {};
0614 int i, dead_cores;
0615 u64 offset;
0616
0617
0618 offset = RST_BOOT;
0619 rst_boot.value = nitrox_read_csr(ndev, offset);
0620 ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
0621
0622 for (i = 0; i < NR_CLUSTERS; i++) {
0623 offset = EMU_FUSE_MAPX(i);
0624 emu_fuse.value = nitrox_read_csr(ndev, offset);
0625 if (emu_fuse.s.valid) {
0626 dead_cores = hweight32(emu_fuse.s.ae_fuse);
0627 ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
0628 dead_cores = hweight16(emu_fuse.s.se_fuse);
0629 ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
0630 }
0631 }
0632
0633 offset = FUS_DAT1;
0634 fus_dat1.value = nitrox_read_csr(ndev, offset);
0635 if (!fus_dat1.nozip) {
0636 dead_cores = hweight8(fus_dat1.zip_info);
0637 ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
0638 }
0639
0640
0641
0642
0643 snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
0644 get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
0645 ndev->hw.freq,
0646 get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
0647 ndev->hw.revision_id);
0648
0649
0650 strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
0651 }
0652
0653 void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
0654 {
0655 u64 value = ~0ULL;
0656 u64 reg_addr;
0657
0658
0659 reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
0660 nitrox_write_csr(ndev, reg_addr, value);
0661
0662
0663 reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
0664 nitrox_write_csr(ndev, reg_addr, value);
0665 }
0666
0667 void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
0668 {
0669 u64 value = ~0ULL;
0670 u64 reg_addr;
0671
0672
0673 reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
0674 nitrox_write_csr(ndev, reg_addr, value);
0675
0676
0677 reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
0678 nitrox_write_csr(ndev, reg_addr, value);
0679 }