0001
0002
0003
0004
0005
0006 #include <linux/cdev.h>
0007 #include <linux/module.h>
0008 #include <linux/kernel.h>
0009 #include <linux/init.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/etherdevice.h>
0012 #include <asm/cacheflush.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/of.h>
0015 #include <linux/of_address.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/spinlock.h>
0019
0020 #include "hns_dsaf_main.h"
0021 #include "hns_dsaf_ppe.h"
0022 #include "hns_dsaf_rcb.h"
0023
0024 #define RCB_COMMON_REG_OFFSET 0x80000
0025 #define TX_RING 0
0026 #define RX_RING 1
0027
0028 #define RCB_RESET_WAIT_TIMES 30
0029 #define RCB_RESET_TRY_TIMES 10
0030
0031
0032 #define RCB_DEFAULT_BUFFER_SIZE 2048
0033
0034
0035
0036
0037
0038
0039
0040 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
0041 {
0042 int i, wait_cnt;
0043 u32 fbd_num;
0044
0045 for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
0046 usleep_range(200, 300);
0047 fbd_num = 0;
0048 if (flag & RCB_INT_FLAG_TX)
0049 fbd_num += dsaf_read_dev(qs[i],
0050 RCB_RING_TX_RING_FBDNUM_REG);
0051 if (flag & RCB_INT_FLAG_RX)
0052 fbd_num += dsaf_read_dev(qs[i],
0053 RCB_RING_RX_RING_FBDNUM_REG);
0054 if (!fbd_num)
0055 i++;
0056 if (wait_cnt >= 10000)
0057 break;
0058 }
0059
0060 if (i < q_num)
0061 dev_err(qs[i]->handle->owner_dev,
0062 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
0063 }
0064
0065 int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
0066 {
0067 u32 head, tail;
0068 int wait_cnt;
0069
0070 tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
0071 wait_cnt = 0;
0072 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
0073 head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
0074 if (tail == head)
0075 break;
0076
0077 usleep_range(100, 200);
0078 }
0079
0080 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
0081 dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
0082 return -EBUSY;
0083 }
0084
0085 return 0;
0086 }
0087
0088
0089
0090
0091
0092 void hns_rcb_reset_ring_hw(struct hnae_queue *q)
0093 {
0094 u32 wait_cnt;
0095 u32 try_cnt = 0;
0096 u32 could_ret;
0097
0098 u32 tx_fbd_num;
0099
0100 while (try_cnt++ < RCB_RESET_TRY_TIMES) {
0101 usleep_range(100, 200);
0102 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
0103 if (tx_fbd_num)
0104 continue;
0105
0106 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
0107
0108 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
0109
0110 msleep(20);
0111 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
0112
0113 wait_cnt = 0;
0114 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
0115 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
0116
0117 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
0118
0119 msleep(20);
0120 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
0121
0122 wait_cnt++;
0123 }
0124
0125 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
0126
0127 if (could_ret)
0128 break;
0129 }
0130
0131 if (try_cnt >= RCB_RESET_TRY_TIMES)
0132 dev_err(q->dev->dev, "port%d reset ring fail\n",
0133 hns_ae_get_vf_cb(q->handle)->port_index);
0134 }
0135
0136
0137
0138
0139
0140
0141
0142 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
0143 {
0144 u32 int_mask_en = !!mask;
0145
0146 if (flag & RCB_INT_FLAG_TX) {
0147 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
0148 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
0149 int_mask_en);
0150 }
0151
0152 if (flag & RCB_INT_FLAG_RX) {
0153 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
0154 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
0155 int_mask_en);
0156 }
0157 }
0158
0159 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
0160 {
0161 if (flag & RCB_INT_FLAG_TX) {
0162 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
0163 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
0164 }
0165
0166 if (flag & RCB_INT_FLAG_RX) {
0167 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
0168 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
0169 }
0170 }
0171
0172 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
0173 {
0174 u32 int_mask_en = !!mask;
0175
0176 if (flag & RCB_INT_FLAG_TX)
0177 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
0178
0179 if (flag & RCB_INT_FLAG_RX)
0180 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
0181 }
0182
0183 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
0184 {
0185 if (flag & RCB_INT_FLAG_TX)
0186 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
0187
0188 if (flag & RCB_INT_FLAG_RX)
0189 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
0190 }
0191
0192
0193
0194
0195
0196
0197 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
0198 {
0199 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
0200 }
0201
0202 void hns_rcb_start(struct hnae_queue *q, u32 val)
0203 {
0204 hns_rcb_ring_enable_hw(q, val);
0205 }
0206
0207
0208
0209
0210
0211 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
0212 {
0213 wmb();
0214 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
0215 wmb();
0216 }
0217
0218
0219
0220
0221
0222 void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
0223 {
0224 u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
0225
0226 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
0227 bd_size_type);
0228 }
0229
0230
0231
0232
0233
0234 void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
0235 {
0236 u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
0237
0238 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
0239 bd_size_type);
0240 }
0241
0242
0243
0244
0245
0246
0247 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
0248 {
0249 struct hnae_queue *q = &ring_pair->q;
0250 struct hnae_ring *ring =
0251 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
0252 dma_addr_t dma = ring->desc_dma_addr;
0253
0254 if (ring_type == RX_RING) {
0255 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
0256 (u32)dma);
0257 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
0258 (u32)((dma >> 31) >> 1));
0259
0260 hns_rcb_set_rx_ring_bs(q, ring->buf_size);
0261
0262 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
0263 ring_pair->port_id_in_comm);
0264 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
0265 ring_pair->port_id_in_comm);
0266 } else {
0267 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
0268 (u32)dma);
0269 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
0270 (u32)((dma >> 31) >> 1));
0271
0272 hns_rcb_set_tx_ring_bs(q, ring->buf_size);
0273
0274 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
0275 ring_pair->port_id_in_comm);
0276 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
0277 ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
0278 }
0279 }
0280
0281
0282
0283
0284
0285 void hns_rcb_init_hw(struct ring_pair_cb *ring)
0286 {
0287 hns_rcb_ring_init(ring, RX_RING);
0288 hns_rcb_ring_init(ring, TX_RING);
0289 }
0290
0291
0292
0293
0294
0295
0296
0297 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
0298 u32 port_idx, u32 desc_cnt)
0299 {
0300 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
0301 desc_cnt);
0302 }
0303
0304 static void hns_rcb_set_port_timeout(
0305 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
0306 {
0307 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
0308 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
0309 timeout * HNS_RCB_CLK_FREQ_MHZ);
0310 } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
0311 if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
0312 dsaf_write_dev(rcb_common,
0313 RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
0314 HNS_RCB_DEF_GAP_TIME_USECS);
0315 else
0316 dsaf_write_dev(rcb_common,
0317 RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
0318 timeout);
0319
0320 dsaf_write_dev(rcb_common,
0321 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
0322 timeout);
0323 } else {
0324 dsaf_write_dev(rcb_common,
0325 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
0326 timeout);
0327 }
0328 }
0329
0330 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
0331 {
0332 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
0333 return HNS_RCB_SERVICE_NW_ENGINE_NUM;
0334 else
0335 return HNS_RCB_DEBUG_NW_ENGINE_NUM;
0336 }
0337
0338
0339 static void hns_rcb_comm_exc_irq_en(
0340 struct rcb_common_cb *rcb_common, int en)
0341 {
0342 u32 clr_vlue = 0xfffffffful;
0343 u32 msk_vlue = en ? 0 : 0xfffffffful;
0344
0345
0346 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
0347
0348 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
0349
0350 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
0351
0352 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
0353 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
0354
0355
0356 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
0357
0358 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
0359
0360
0361 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
0362
0363 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
0364 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
0365 }
0366
0367
0368
0369
0370
0371
0372 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
0373 {
0374 u32 reg_val;
0375 int i;
0376 int port_num = hns_rcb_common_get_port_num(rcb_common);
0377
0378 hns_rcb_comm_exc_irq_en(rcb_common, 0);
0379
0380 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
0381 if (0x1 != (reg_val & 0x1)) {
0382 dev_err(rcb_common->dsaf_dev->dev,
0383 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
0384 return -EBUSY;
0385 }
0386
0387 for (i = 0; i < port_num; i++) {
0388 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
0389 hns_rcb_set_rx_coalesced_frames(
0390 rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
0391 if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
0392 !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
0393 hns_rcb_set_tx_coalesced_frames(
0394 rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
0395 hns_rcb_set_port_timeout(
0396 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
0397 }
0398
0399 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
0400 HNS_RCB_COMMON_ENDIAN);
0401
0402 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
0403 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
0404 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
0405 } else {
0406 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
0407 RCB_COM_CFG_FNA_B, false);
0408 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
0409 RCB_COM_CFG_FA_B, true);
0410 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
0411 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
0412 }
0413
0414 return 0;
0415 }
0416
0417 int hns_rcb_buf_size2type(u32 buf_size)
0418 {
0419 int bd_size_type;
0420
0421 switch (buf_size) {
0422 case 512:
0423 bd_size_type = HNS_BD_SIZE_512_TYPE;
0424 break;
0425 case 1024:
0426 bd_size_type = HNS_BD_SIZE_1024_TYPE;
0427 break;
0428 case 2048:
0429 bd_size_type = HNS_BD_SIZE_2048_TYPE;
0430 break;
0431 case 4096:
0432 bd_size_type = HNS_BD_SIZE_4096_TYPE;
0433 break;
0434 default:
0435 bd_size_type = -EINVAL;
0436 }
0437
0438 return bd_size_type;
0439 }
0440
0441 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
0442 {
0443 struct hnae_ring *ring;
0444 struct rcb_common_cb *rcb_common;
0445 struct ring_pair_cb *ring_pair_cb;
0446 u16 desc_num, mdnum_ppkt;
0447 bool irq_idx, is_ver1;
0448
0449 ring_pair_cb = container_of(q, struct ring_pair_cb, q);
0450 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver);
0451 if (ring_type == RX_RING) {
0452 ring = &q->rx_ring;
0453 ring->io_base = ring_pair_cb->q.io_base;
0454 irq_idx = HNS_RCB_IRQ_IDX_RX;
0455 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
0456 } else {
0457 ring = &q->tx_ring;
0458 ring->io_base = ring_pair_cb->q.io_base +
0459 HNS_RCB_TX_REG_OFFSET;
0460 irq_idx = HNS_RCB_IRQ_IDX_TX;
0461 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
0462 HNS_RCBV2_RING_MAX_TXBD_PER_PKT;
0463 }
0464
0465 rcb_common = ring_pair_cb->rcb_common;
0466 desc_num = rcb_common->dsaf_dev->desc_num;
0467
0468 ring->desc = NULL;
0469 ring->desc_cb = NULL;
0470
0471 ring->irq = ring_pair_cb->virq[irq_idx];
0472 ring->desc_dma_addr = 0;
0473
0474 ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
0475 ring->desc_num = desc_num;
0476 ring->max_desc_num_per_pkt = mdnum_ppkt;
0477 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
0478 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
0479 ring->next_to_use = 0;
0480 ring->next_to_clean = 0;
0481 }
0482
0483 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
0484 {
0485 ring_pair_cb->q.handle = NULL;
0486
0487 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
0488 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
0489 }
0490
0491 static int hns_rcb_get_port_in_comm(
0492 struct rcb_common_cb *rcb_common, int ring_idx)
0493 {
0494 return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
0495 }
0496
0497 #define SERVICE_RING_IRQ_IDX(v1) \
0498 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
0499 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
0500 {
0501 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
0502
0503 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
0504 return SERVICE_RING_IRQ_IDX(is_ver1);
0505 else
0506 return HNS_DEBUG_RING_IRQ_IDX;
0507 }
0508
0509 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
0510 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
0511
0512
0513
0514
0515 int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
0516 {
0517 struct ring_pair_cb *ring_pair_cb;
0518 u32 i;
0519 u32 ring_num = rcb_common->ring_num;
0520 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
0521 struct platform_device *pdev =
0522 to_platform_device(rcb_common->dsaf_dev->dev);
0523 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
0524
0525 for (i = 0; i < ring_num; i++) {
0526 ring_pair_cb = &rcb_common->ring_pair_cb[i];
0527 ring_pair_cb->rcb_common = rcb_common;
0528 ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
0529 ring_pair_cb->index = i;
0530 ring_pair_cb->q.io_base =
0531 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
0532 ring_pair_cb->port_id_in_comm =
0533 hns_rcb_get_port_in_comm(rcb_common, i);
0534 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
0535 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
0536 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
0537 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
0538 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
0539 platform_get_irq(pdev, base_irq_idx + i * 3);
0540 if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
0541 (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
0542 return -EPROBE_DEFER;
0543
0544 ring_pair_cb->q.phy_base =
0545 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
0546 hns_rcb_ring_pair_get_cfg(ring_pair_cb);
0547 }
0548
0549 return 0;
0550 }
0551
0552
0553
0554
0555
0556
0557
0558
0559 u32 hns_rcb_get_rx_coalesced_frames(
0560 struct rcb_common_cb *rcb_common, u32 port_idx)
0561 {
0562 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
0563 }
0564
0565
0566
0567
0568
0569
0570
0571
0572 u32 hns_rcb_get_tx_coalesced_frames(
0573 struct rcb_common_cb *rcb_common, u32 port_idx)
0574 {
0575 u64 reg;
0576
0577 reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
0578 return dsaf_read_dev(rcb_common, reg);
0579 }
0580
0581
0582
0583
0584
0585
0586
0587
0588 u32 hns_rcb_get_coalesce_usecs(
0589 struct rcb_common_cb *rcb_common, u32 port_idx)
0590 {
0591 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
0592 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
0593 HNS_RCB_CLK_FREQ_MHZ;
0594 else
0595 return dsaf_read_dev(rcb_common,
0596 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
0597 }
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608 int hns_rcb_set_coalesce_usecs(
0609 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
0610 {
0611 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
0612
0613 if (timeout == old_timeout)
0614 return 0;
0615
0616 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
0617 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
0618 dev_err(rcb_common->dsaf_dev->dev,
0619 "error: not support coalesce_usecs setting!\n");
0620 return -EINVAL;
0621 }
0622 }
0623 if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
0624 dev_err(rcb_common->dsaf_dev->dev,
0625 "error: coalesce_usecs setting supports 1~1023us\n");
0626 return -EINVAL;
0627 }
0628 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
0629 return 0;
0630 }
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 int hns_rcb_set_tx_coalesced_frames(
0642 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
0643 {
0644 u32 old_waterline =
0645 hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
0646 u64 reg;
0647
0648 if (coalesced_frames == old_waterline)
0649 return 0;
0650
0651 if (coalesced_frames != 1) {
0652 dev_err(rcb_common->dsaf_dev->dev,
0653 "error: not support tx coalesce_frames setting!\n");
0654 return -EINVAL;
0655 }
0656
0657 reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
0658 dsaf_write_dev(rcb_common, reg, coalesced_frames);
0659 return 0;
0660 }
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671 int hns_rcb_set_rx_coalesced_frames(
0672 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
0673 {
0674 u32 old_waterline =
0675 hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
0676
0677 if (coalesced_frames == old_waterline)
0678 return 0;
0679
0680 if (coalesced_frames >= rcb_common->desc_num ||
0681 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
0682 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
0683 dev_err(rcb_common->dsaf_dev->dev,
0684 "error: not support coalesce_frames setting!\n");
0685 return -EINVAL;
0686 }
0687
0688 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
0689 coalesced_frames);
0690 return 0;
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
0701 u16 *max_q_per_vf)
0702 {
0703 switch (dsaf_mode) {
0704 case DSAF_MODE_DISABLE_6PORT_0VM:
0705 *max_vfn = 1;
0706 *max_q_per_vf = 16;
0707 break;
0708 case DSAF_MODE_DISABLE_FIX:
0709 case DSAF_MODE_DISABLE_SP:
0710 *max_vfn = 1;
0711 *max_q_per_vf = 1;
0712 break;
0713 case DSAF_MODE_DISABLE_2PORT_64VM:
0714 *max_vfn = 64;
0715 *max_q_per_vf = 1;
0716 break;
0717 case DSAF_MODE_DISABLE_6PORT_16VM:
0718 *max_vfn = 16;
0719 *max_q_per_vf = 1;
0720 break;
0721 default:
0722 *max_vfn = 1;
0723 *max_q_per_vf = 16;
0724 break;
0725 }
0726 }
0727
0728 static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
0729 {
0730 switch (dsaf_dev->dsaf_mode) {
0731 case DSAF_MODE_ENABLE_FIX:
0732 case DSAF_MODE_DISABLE_SP:
0733 return 1;
0734
0735 case DSAF_MODE_DISABLE_FIX:
0736 return 6;
0737
0738 case DSAF_MODE_ENABLE_0VM:
0739 return 32;
0740
0741 case DSAF_MODE_DISABLE_6PORT_0VM:
0742 case DSAF_MODE_ENABLE_16VM:
0743 case DSAF_MODE_DISABLE_6PORT_2VM:
0744 case DSAF_MODE_DISABLE_6PORT_16VM:
0745 case DSAF_MODE_DISABLE_6PORT_4VM:
0746 case DSAF_MODE_ENABLE_8VM:
0747 return 96;
0748
0749 case DSAF_MODE_DISABLE_2PORT_16VM:
0750 case DSAF_MODE_DISABLE_2PORT_8VM:
0751 case DSAF_MODE_ENABLE_32VM:
0752 case DSAF_MODE_DISABLE_2PORT_64VM:
0753 case DSAF_MODE_ENABLE_128VM:
0754 return 128;
0755
0756 default:
0757 dev_warn(dsaf_dev->dev,
0758 "get ring num fail,use default!dsaf_mode=%d\n",
0759 dsaf_dev->dsaf_mode);
0760 return 128;
0761 }
0762 }
0763
0764 static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
0765 {
0766 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
0767
0768 return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
0769 }
0770
0771 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
0772 {
0773 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
0774
0775 return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
0776 }
0777
0778 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
0779 int comm_index)
0780 {
0781 struct rcb_common_cb *rcb_common;
0782 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
0783 u16 max_vfn;
0784 u16 max_q_per_vf;
0785 int ring_num = hns_rcb_get_ring_num(dsaf_dev);
0786
0787 rcb_common =
0788 devm_kzalloc(dsaf_dev->dev,
0789 struct_size(rcb_common, ring_pair_cb, ring_num),
0790 GFP_KERNEL);
0791 if (!rcb_common) {
0792 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
0793 return -ENOMEM;
0794 }
0795 rcb_common->comm_index = comm_index;
0796 rcb_common->ring_num = ring_num;
0797 rcb_common->dsaf_dev = dsaf_dev;
0798
0799 rcb_common->desc_num = dsaf_dev->desc_num;
0800
0801 hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
0802 rcb_common->max_vfn = max_vfn;
0803 rcb_common->max_q_per_vf = max_q_per_vf;
0804
0805 rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
0806 rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
0807
0808 dsaf_dev->rcb_common[comm_index] = rcb_common;
0809 return 0;
0810 }
0811
0812 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
0813 u32 comm_index)
0814 {
0815 dsaf_dev->rcb_common[comm_index] = NULL;
0816 }
0817
0818 void hns_rcb_update_stats(struct hnae_queue *queue)
0819 {
0820 struct ring_pair_cb *ring =
0821 container_of(queue, struct ring_pair_cb, q);
0822 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
0823 struct ppe_common_cb *ppe_common
0824 = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
0825 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
0826
0827 hw_stats->rx_pkts += dsaf_read_dev(queue,
0828 RCB_RING_RX_RING_PKTNUM_RECORD_REG);
0829 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
0830
0831 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
0832 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
0833 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
0834 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
0835
0836 hw_stats->tx_pkts += dsaf_read_dev(queue,
0837 RCB_RING_TX_RING_PKTNUM_RECORD_REG);
0838 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
0839
0840 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
0841 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
0842 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
0843 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
0844 }
0845
0846
0847
0848
0849
0850
0851 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
0852 {
0853 u64 *regs_buff = data;
0854 struct ring_pair_cb *ring =
0855 container_of(queue, struct ring_pair_cb, q);
0856 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
0857
0858 regs_buff[0] = hw_stats->tx_pkts;
0859 regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
0860 regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
0861 regs_buff[3] =
0862 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
0863
0864 regs_buff[4] = queue->tx_ring.stats.tx_pkts;
0865 regs_buff[5] = queue->tx_ring.stats.tx_bytes;
0866 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
0867 regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
0868 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
0869 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
0870 regs_buff[10] = queue->tx_ring.stats.restart_queue;
0871 regs_buff[11] = queue->tx_ring.stats.tx_busy;
0872
0873 regs_buff[12] = hw_stats->rx_pkts;
0874 regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
0875 regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
0876 regs_buff[15] =
0877 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
0878
0879 regs_buff[16] = queue->rx_ring.stats.rx_pkts;
0880 regs_buff[17] = queue->rx_ring.stats.rx_bytes;
0881 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
0882 regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
0883 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
0884 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
0885 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
0886 regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
0887 regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
0888 regs_buff[25] = queue->rx_ring.stats.err_bd_num;
0889 regs_buff[26] = queue->rx_ring.stats.l2_err;
0890 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
0891 }
0892
0893
0894
0895
0896
0897
0898 int hns_rcb_get_ring_sset_count(int stringset)
0899 {
0900 if (stringset == ETH_SS_STATS)
0901 return HNS_RING_STATIC_REG_NUM;
0902
0903 return 0;
0904 }
0905
0906
0907
0908
0909
0910 int hns_rcb_get_common_regs_count(void)
0911 {
0912 return HNS_RCB_COMMON_DUMP_REG_NUM;
0913 }
0914
0915
0916
0917
0918
0919 int hns_rcb_get_ring_regs_count(void)
0920 {
0921 return HNS_RCB_RING_DUMP_REG_NUM;
0922 }
0923
0924
0925
0926
0927
0928
0929
0930 void hns_rcb_get_strings(int stringset, u8 *data, int index)
0931 {
0932 u8 *buff = data;
0933
0934 if (stringset != ETH_SS_STATS)
0935 return;
0936
0937 ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
0938 ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
0939 ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
0940 ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
0941
0942 ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
0943 ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
0944 ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
0945 ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
0946 ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
0947 ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
0948 ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
0949 ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
0950
0951 ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
0952 ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
0953 ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
0954 ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
0955
0956 ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
0957 ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
0958 ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
0959 ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
0960 ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
0961 ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
0962 ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
0963 ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
0964 ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
0965 ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
0966 ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
0967 ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
0968 }
0969
0970 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
0971 {
0972 u32 *regs = data;
0973 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
0974 bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
0975 u32 reg_tmp;
0976 u32 reg_num_tmp;
0977 u32 i;
0978
0979
0980 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
0981 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
0982 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
0983
0984 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
0985 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
0986 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
0987 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
0988 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
0989 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
0990
0991 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
0992 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
0993 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
0994 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
0995 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
0996 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
0997 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
0998 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
0999 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
1000 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
1001 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
1002 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
1003 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
1004 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
1005 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
1006 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
1007 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
1008 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
1009 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
1010
1011 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
1012 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
1013 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
1014 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
1015 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
1016 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
1017 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
1018 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
1019 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
1020 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
1021
1022
1023 for (i = 0; i < 16; i++) {
1024 regs[38 + i]
1025 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
1026 regs[54 + i]
1027 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
1028 }
1029
1030 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
1031 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
1032 for (i = 0; i < reg_num_tmp; i++)
1033 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
1034
1035 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
1036 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
1037
1038
1039 for (i = 78; i < 80; i++)
1040 regs[i] = 0xcccccccc;
1041 }
1042
1043 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
1044 {
1045 u32 *regs = data;
1046 struct ring_pair_cb *ring_pair
1047 = container_of(queue, struct ring_pair_cb, q);
1048 u32 i;
1049
1050
1051 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
1052 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
1053 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
1054 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
1055 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
1056 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
1057 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
1058 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
1059 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
1060
1061 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
1062 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
1063 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
1064 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
1065 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
1066 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
1067 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
1068 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
1069 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
1070 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
1071
1072 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
1073 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
1074 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
1075 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
1076 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
1077 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
1078 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
1079
1080 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
1081 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
1082 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
1083 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
1084 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
1085 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
1086 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
1087 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
1088
1089
1090 for (i = 35; i < 40; i++)
1091 regs[i] = 0xcccccc00 + ring_pair->index;
1092 }