0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/types.h>
0011 #include <linux/skbuff.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/ip.h>
0015 #include <linux/dma-mapping.h>
0016
0017 #include "sparx5_main_regs.h"
0018 #include "sparx5_main.h"
0019 #include "sparx5_port.h"
0020
0021 #define FDMA_XTR_CHANNEL 6
0022 #define FDMA_INJ_CHANNEL 0
0023
0024 #define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
0025 #define FDMA_DCB_INFO_TOKEN BIT(17)
0026 #define FDMA_DCB_INFO_INTR BIT(18)
0027 #define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24))
0028
0029 #define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
0030 #define FDMA_DCB_STATUS_SOF BIT(16)
0031 #define FDMA_DCB_STATUS_EOF BIT(17)
0032 #define FDMA_DCB_STATUS_INTR BIT(18)
0033 #define FDMA_DCB_STATUS_DONE BIT(19)
0034 #define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
0035 #define FDMA_DCB_INVALID_DATA 0x1
0036
0037 #define FDMA_XTR_BUFFER_SIZE 2048
0038 #define FDMA_WEIGHT 4
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 struct sparx5_db {
0075 struct list_head list;
0076 void *cpu_addr;
0077 };
0078
0079 static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx,
0080 struct sparx5_rx_dcb_hw *dcb,
0081 u64 nextptr)
0082 {
0083 int idx = 0;
0084
0085
0086 for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) {
0087 struct sparx5_db_hw *db = &dcb->db[idx];
0088
0089 db->status = FDMA_DCB_STATUS_INTR;
0090 }
0091 dcb->nextptr = FDMA_DCB_INVALID_DATA;
0092 dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
0093 rx->last_entry->nextptr = nextptr;
0094 rx->last_entry = dcb;
0095 }
0096
0097 static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx,
0098 struct sparx5_tx_dcb_hw *dcb,
0099 u64 nextptr)
0100 {
0101 int idx = 0;
0102
0103
0104 for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) {
0105 struct sparx5_db_hw *db = &dcb->db[idx];
0106
0107 db->status = FDMA_DCB_STATUS_DONE;
0108 }
0109 dcb->nextptr = FDMA_DCB_INVALID_DATA;
0110 dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE);
0111 }
0112
0113 static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
0114 {
0115
0116 spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5,
0117 FDMA_DCB_LLP(rx->channel_id));
0118 spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id));
0119
0120
0121 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
0122 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
0123 FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
0124 sparx5, FDMA_CH_CFG(rx->channel_id));
0125
0126
0127 spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
0128 sparx5,
0129 FDMA_XTR_CFG);
0130
0131
0132 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP,
0133 sparx5, FDMA_PORT_CTRL(0));
0134
0135
0136 spx5_rmw(BIT(rx->channel_id),
0137 BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
0138 sparx5, FDMA_INTR_DB_ENA);
0139
0140
0141 spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE);
0142 }
0143
0144 static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
0145 {
0146
0147 spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
0148 sparx5, FDMA_CH_ACTIVATE);
0149
0150
0151 spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
0152 sparx5, FDMA_INTR_DB_ENA);
0153
0154
0155 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP,
0156 sparx5, FDMA_PORT_CTRL(0));
0157 }
0158
0159 static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
0160 {
0161
0162 spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5,
0163 FDMA_DCB_LLP(tx->channel_id));
0164 spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id));
0165
0166
0167 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
0168 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
0169 FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
0170 sparx5, FDMA_CH_CFG(tx->channel_id));
0171
0172
0173 spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
0174 sparx5, FDMA_PORT_CTRL(0));
0175
0176
0177 spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE);
0178 }
0179
0180 static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
0181 {
0182
0183 spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
0184 sparx5, FDMA_CH_ACTIVATE);
0185 }
0186
0187 static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx)
0188 {
0189
0190 spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD);
0191 }
0192
0193 static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx)
0194 {
0195
0196 spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD);
0197 }
0198
0199 static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx)
0200 {
0201 return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE,
0202 GFP_ATOMIC);
0203 }
0204
0205 static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
0206 {
0207 struct sparx5_db_hw *db_hw;
0208 unsigned int packet_size;
0209 struct sparx5_port *port;
0210 struct sk_buff *new_skb;
0211 struct frame_info fi;
0212 struct sk_buff *skb;
0213 dma_addr_t dma_addr;
0214
0215
0216 db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index];
0217 if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE)))
0218 return false;
0219 skb = rx->skb[rx->dcb_index][rx->db_index];
0220
0221 new_skb = sparx5_fdma_rx_alloc_skb(rx);
0222 if (unlikely(!new_skb))
0223 return false;
0224
0225 dma_addr = virt_to_phys(new_skb->data);
0226 rx->skb[rx->dcb_index][rx->db_index] = new_skb;
0227 db_hw->dataptr = dma_addr;
0228 packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status);
0229 skb_put(skb, packet_size);
0230
0231 sparx5_ifh_parse((u32 *)skb->data, &fi);
0232
0233 port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL;
0234 if (!port || !port->ndev) {
0235 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
0236 sparx5_xtr_flush(sparx5, XTR_QUEUE);
0237 return false;
0238 }
0239 skb->dev = port->ndev;
0240 skb_pull(skb, IFH_LEN * sizeof(u32));
0241 if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
0242 skb_trim(skb, skb->len - ETH_FCS_LEN);
0243
0244 sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
0245 skb->protocol = eth_type_trans(skb, skb->dev);
0246
0247
0248
0249 if (test_bit(port->portno, sparx5->bridge_mask))
0250 skb->offload_fwd_mark = 1;
0251 skb->dev->stats.rx_bytes += skb->len;
0252 skb->dev->stats.rx_packets++;
0253 rx->packets++;
0254 netif_receive_skb(skb);
0255 return true;
0256 }
0257
0258 static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
0259 {
0260 struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
0261 struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
0262 int counter = 0;
0263
0264 while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
0265 struct sparx5_rx_dcb_hw *old_dcb;
0266
0267 rx->db_index++;
0268 counter++;
0269
0270 if (rx->db_index != FDMA_RX_DCB_MAX_DBS)
0271 continue;
0272
0273
0274
0275 rx->db_index = 0;
0276 old_dcb = &rx->dcb_entries[rx->dcb_index];
0277 rx->dcb_index++;
0278 rx->dcb_index &= FDMA_DCB_MAX - 1;
0279 sparx5_fdma_rx_add_dcb(rx, old_dcb,
0280 rx->dma +
0281 ((unsigned long)old_dcb -
0282 (unsigned long)rx->dcb_entries));
0283 }
0284 if (counter < weight) {
0285 napi_complete_done(&rx->napi, counter);
0286 spx5_rmw(BIT(rx->channel_id),
0287 BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
0288 sparx5, FDMA_INTR_DB_ENA);
0289 }
0290 if (counter)
0291 sparx5_fdma_rx_reload(sparx5, rx);
0292 return counter;
0293 }
0294
0295 static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx,
0296 struct sparx5_tx_dcb_hw *dcb)
0297 {
0298 struct sparx5_tx_dcb_hw *next_dcb;
0299
0300 next_dcb = dcb;
0301 next_dcb++;
0302
0303 if ((unsigned long)next_dcb >=
0304 ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb)))
0305 next_dcb = tx->first_entry;
0306 return next_dcb;
0307 }
0308
0309 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
0310 {
0311 struct sparx5_tx_dcb_hw *next_dcb_hw;
0312 struct sparx5_tx *tx = &sparx5->tx;
0313 static bool first_time = true;
0314 struct sparx5_db_hw *db_hw;
0315 struct sparx5_db *db;
0316
0317 next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
0318 db_hw = &next_dcb_hw->db[0];
0319 if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
0320 tx->dropped++;
0321 db = list_first_entry(&tx->db_list, struct sparx5_db, list);
0322 list_move_tail(&db->list, &tx->db_list);
0323 next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
0324 tx->curr_entry->nextptr = tx->dma +
0325 ((unsigned long)next_dcb_hw -
0326 (unsigned long)tx->first_entry);
0327 tx->curr_entry = next_dcb_hw;
0328 memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
0329 memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
0330 memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
0331 db_hw->status = FDMA_DCB_STATUS_SOF |
0332 FDMA_DCB_STATUS_EOF |
0333 FDMA_DCB_STATUS_BLOCKO(0) |
0334 FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4);
0335 if (first_time) {
0336 sparx5_fdma_tx_activate(sparx5, tx);
0337 first_time = false;
0338 } else {
0339 sparx5_fdma_tx_reload(sparx5, tx);
0340 }
0341 return NETDEV_TX_OK;
0342 }
0343
0344 static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
0345 {
0346 struct sparx5_rx *rx = &sparx5->rx;
0347 struct sparx5_rx_dcb_hw *dcb;
0348 int idx, jdx;
0349 int size;
0350
0351 size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX;
0352 size = ALIGN(size, PAGE_SIZE);
0353 rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
0354 if (!rx->dcb_entries)
0355 return -ENOMEM;
0356 rx->dma = virt_to_phys(rx->dcb_entries);
0357 rx->last_entry = rx->dcb_entries;
0358 rx->db_index = 0;
0359 rx->dcb_index = 0;
0360
0361 for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
0362 dcb = &rx->dcb_entries[idx];
0363 dcb->info = 0;
0364
0365
0366
0367
0368 for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) {
0369 struct sparx5_db_hw *db_hw = &dcb->db[jdx];
0370 dma_addr_t dma_addr;
0371 struct sk_buff *skb;
0372
0373 skb = sparx5_fdma_rx_alloc_skb(rx);
0374 if (!skb)
0375 return -ENOMEM;
0376
0377 dma_addr = virt_to_phys(skb->data);
0378 db_hw->dataptr = dma_addr;
0379 db_hw->status = 0;
0380 rx->skb[idx][jdx] = skb;
0381 }
0382 sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
0383 }
0384 netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
0385 FDMA_WEIGHT);
0386 napi_enable(&rx->napi);
0387 sparx5_fdma_rx_activate(sparx5, rx);
0388 return 0;
0389 }
0390
0391 static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
0392 {
0393 struct sparx5_tx *tx = &sparx5->tx;
0394 struct sparx5_tx_dcb_hw *dcb;
0395 int idx, jdx;
0396 int size;
0397
0398 size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX;
0399 size = ALIGN(size, PAGE_SIZE);
0400 tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL);
0401 if (!tx->curr_entry)
0402 return -ENOMEM;
0403 tx->dma = virt_to_phys(tx->curr_entry);
0404 tx->first_entry = tx->curr_entry;
0405 INIT_LIST_HEAD(&tx->db_list);
0406
0407 for (idx = 0; idx < FDMA_DCB_MAX; ++idx) {
0408 dcb = &tx->curr_entry[idx];
0409 dcb->info = 0;
0410
0411 for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) {
0412 struct sparx5_db_hw *db_hw = &dcb->db[jdx];
0413 struct sparx5_db *db;
0414 dma_addr_t phys;
0415 void *cpu_addr;
0416
0417 cpu_addr = devm_kzalloc(sparx5->dev,
0418 FDMA_XTR_BUFFER_SIZE,
0419 GFP_KERNEL);
0420 if (!cpu_addr)
0421 return -ENOMEM;
0422 phys = virt_to_phys(cpu_addr);
0423 db_hw->dataptr = phys;
0424 db_hw->status = 0;
0425 db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
0426 if (!db)
0427 return -ENOMEM;
0428 db->cpu_addr = cpu_addr;
0429 list_add_tail(&db->list, &tx->db_list);
0430 }
0431 sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx);
0432
0433 if (idx == FDMA_DCB_MAX - 1)
0434 tx->curr_entry = dcb;
0435 }
0436 return 0;
0437 }
0438
0439 static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
0440 struct sparx5_rx *rx, int channel)
0441 {
0442 int idx;
0443
0444 rx->channel_id = channel;
0445
0446 for (idx = 0; idx < SPX5_PORTS; ++idx) {
0447 struct sparx5_port *port = sparx5->ports[idx];
0448
0449 if (port && port->ndev) {
0450 rx->ndev = port->ndev;
0451 break;
0452 }
0453 }
0454 }
0455
0456 static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
0457 struct sparx5_tx *tx, int channel)
0458 {
0459 tx->channel_id = channel;
0460 }
0461
0462 irqreturn_t sparx5_fdma_handler(int irq, void *args)
0463 {
0464 struct sparx5 *sparx5 = args;
0465 u32 db = 0, err = 0;
0466
0467 db = spx5_rd(sparx5, FDMA_INTR_DB);
0468 err = spx5_rd(sparx5, FDMA_INTR_ERR);
0469
0470 if (db) {
0471 spx5_wr(0, sparx5, FDMA_INTR_DB_ENA);
0472 spx5_wr(db, sparx5, FDMA_INTR_DB);
0473 napi_schedule(&sparx5->rx.napi);
0474 }
0475 if (err) {
0476 u32 err_type = spx5_rd(sparx5, FDMA_ERRORS);
0477
0478 dev_err_ratelimited(sparx5->dev,
0479 "ERR: int: %#x, type: %#x\n",
0480 err, err_type);
0481 spx5_wr(err, sparx5, FDMA_INTR_ERR);
0482 spx5_wr(err_type, sparx5, FDMA_ERRORS);
0483 }
0484 return IRQ_HANDLED;
0485 }
0486
0487 static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
0488 {
0489 const int byte_swap = 1;
0490 int portno;
0491 int urgency;
0492
0493
0494 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) |
0495 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
0496 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
0497 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
0498 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) |
0499 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
0500 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
0501
0502
0503 for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
0504
0505 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
0506 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
0507 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1),
0508 sparx5, ASM_PORT_CFG(portno));
0509
0510
0511 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
0512 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
0513 sparx5,
0514 DSM_DEV_TX_STOP_WM_CFG(portno));
0515
0516
0517 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100),
0518 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
0519 sparx5,
0520 DSM_DEV_TX_STOP_WM_CFG(portno));
0521
0522
0523 urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500);
0524 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
0525 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
0526 QFWD_SWITCH_PORT_MODE_PORT_ENA |
0527 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
0528 sparx5,
0529 QFWD_SWITCH_PORT_MODE(portno));
0530
0531
0532
0533
0534 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1),
0535 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
0536 sparx5,
0537 DSM_BUF_CFG(portno));
0538
0539
0540 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1),
0541 HSCH_PORT_MODE_AGE_DIS,
0542 sparx5,
0543 HSCH_PORT_MODE(portno));
0544 }
0545 }
0546
0547 int sparx5_fdma_start(struct sparx5 *sparx5)
0548 {
0549 int err;
0550
0551
0552 spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
0553 spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
0554
0555
0556 spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) |
0557 CPU_PROC_CTRL_ACP_AWCACHE_SET(0) |
0558 CPU_PROC_CTRL_ACP_ARCACHE_SET(0),
0559 CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA |
0560 CPU_PROC_CTRL_ACP_AWCACHE |
0561 CPU_PROC_CTRL_ACP_ARCACHE,
0562 sparx5, CPU_PROC_CTRL);
0563
0564 sparx5_fdma_injection_mode(sparx5);
0565 sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL);
0566 sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL);
0567 err = sparx5_fdma_rx_alloc(sparx5);
0568 if (err) {
0569 dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err);
0570 return err;
0571 }
0572 err = sparx5_fdma_tx_alloc(sparx5);
0573 if (err) {
0574 dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err);
0575 return err;
0576 }
0577 return err;
0578 }
0579
0580 static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
0581 {
0582 return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
0583 }
0584
0585 int sparx5_fdma_stop(struct sparx5 *sparx5)
0586 {
0587 u32 val;
0588
0589 napi_disable(&sparx5->rx.napi);
0590
0591 sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
0592 sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
0593
0594 read_poll_timeout(sparx5_fdma_port_ctrl, val,
0595 FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
0596 500, 10000, 0, sparx5);
0597 return 0;
0598 }