0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/acpi.h>
0011 #include <linux/kernel.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/inetdevice.h>
0017 #include <linux/mbus.h>
0018 #include <linux/module.h>
0019 #include <linux/mfd/syscon.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/cpumask.h>
0022 #include <linux/of.h>
0023 #include <linux/of_irq.h>
0024 #include <linux/of_mdio.h>
0025 #include <linux/of_net.h>
0026 #include <linux/of_address.h>
0027 #include <linux/of_device.h>
0028 #include <linux/phy.h>
0029 #include <linux/phylink.h>
0030 #include <linux/phy/phy.h>
0031 #include <linux/ptp_classify.h>
0032 #include <linux/clk.h>
0033 #include <linux/hrtimer.h>
0034 #include <linux/ktime.h>
0035 #include <linux/regmap.h>
0036 #include <uapi/linux/ppp_defs.h>
0037 #include <net/ip.h>
0038 #include <net/ipv6.h>
0039 #include <net/tso.h>
0040 #include <linux/bpf_trace.h>
0041
0042 #include "mvpp2.h"
0043 #include "mvpp2_prs.h"
0044 #include "mvpp2_cls.h"
0045
0046 enum mvpp2_bm_pool_log_num {
0047 MVPP2_BM_SHORT,
0048 MVPP2_BM_LONG,
0049 MVPP2_BM_JUMBO,
0050 MVPP2_BM_POOLS_NUM
0051 };
0052
0053 static struct {
0054 int pkt_size;
0055 int buf_num;
0056 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
0057
0058
0059
0060
0061 static void mvpp2_acpi_start(struct mvpp2_port *port);
0062
0063
0064 #define MVPP2_QDIST_SINGLE_MODE 0
0065 #define MVPP2_QDIST_MULTI_MODE 1
0066
0067 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
0068
0069 module_param(queue_mode, int, 0444);
0070 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
0071
0072
0073
0074 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
0075 {
0076 writel(data, priv->swth_base[0] + offset);
0077 }
0078
0079 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
0080 {
0081 return readl(priv->swth_base[0] + offset);
0082 }
0083
0084 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
0085 {
0086 return readl_relaxed(priv->swth_base[0] + offset);
0087 }
0088
0089 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
0090 {
0091 return cpu % priv->nthreads;
0092 }
0093
0094 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
0095 {
0096 writel(data, priv->cm3_base + offset);
0097 }
0098
0099 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
0100 {
0101 return readl(priv->cm3_base + offset);
0102 }
0103
0104 static struct page_pool *
0105 mvpp2_create_page_pool(struct device *dev, int num, int len,
0106 enum dma_data_direction dma_dir)
0107 {
0108 struct page_pool_params pp_params = {
0109
0110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
0111 .pool_size = num,
0112 .nid = NUMA_NO_NODE,
0113 .dev = dev,
0114 .dma_dir = dma_dir,
0115 .offset = MVPP2_SKB_HEADROOM,
0116 .max_len = len,
0117 };
0118
0119 return page_pool_create(&pp_params);
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
0159 u32 offset, u32 data)
0160 {
0161 writel(data, priv->swth_base[thread] + offset);
0162 }
0163
0164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
0165 u32 offset)
0166 {
0167 return readl(priv->swth_base[thread] + offset);
0168 }
0169
0170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
0171 u32 offset, u32 data)
0172 {
0173 writel_relaxed(data, priv->swth_base[thread] + offset);
0174 }
0175
0176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
0177 u32 offset)
0178 {
0179 return readl_relaxed(priv->swth_base[thread] + offset);
0180 }
0181
0182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
0183 struct mvpp2_tx_desc *tx_desc)
0184 {
0185 if (port->priv->hw_version == MVPP21)
0186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
0187 else
0188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
0189 MVPP2_DESC_DMA_MASK;
0190 }
0191
0192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
0193 struct mvpp2_tx_desc *tx_desc,
0194 dma_addr_t dma_addr)
0195 {
0196 dma_addr_t addr, offset;
0197
0198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
0199 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
0200
0201 if (port->priv->hw_version == MVPP21) {
0202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
0203 tx_desc->pp21.packet_offset = offset;
0204 } else {
0205 __le64 val = cpu_to_le64(addr);
0206
0207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
0208 tx_desc->pp22.buf_dma_addr_ptp |= val;
0209 tx_desc->pp22.packet_offset = offset;
0210 }
0211 }
0212
0213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
0214 struct mvpp2_tx_desc *tx_desc)
0215 {
0216 if (port->priv->hw_version == MVPP21)
0217 return le16_to_cpu(tx_desc->pp21.data_size);
0218 else
0219 return le16_to_cpu(tx_desc->pp22.data_size);
0220 }
0221
0222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
0223 struct mvpp2_tx_desc *tx_desc,
0224 size_t size)
0225 {
0226 if (port->priv->hw_version == MVPP21)
0227 tx_desc->pp21.data_size = cpu_to_le16(size);
0228 else
0229 tx_desc->pp22.data_size = cpu_to_le16(size);
0230 }
0231
0232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
0233 struct mvpp2_tx_desc *tx_desc,
0234 unsigned int txq)
0235 {
0236 if (port->priv->hw_version == MVPP21)
0237 tx_desc->pp21.phys_txq = txq;
0238 else
0239 tx_desc->pp22.phys_txq = txq;
0240 }
0241
0242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
0243 struct mvpp2_tx_desc *tx_desc,
0244 unsigned int command)
0245 {
0246 if (port->priv->hw_version == MVPP21)
0247 tx_desc->pp21.command = cpu_to_le32(command);
0248 else
0249 tx_desc->pp22.command = cpu_to_le32(command);
0250 }
0251
0252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
0253 struct mvpp2_tx_desc *tx_desc)
0254 {
0255 if (port->priv->hw_version == MVPP21)
0256 return tx_desc->pp21.packet_offset;
0257 else
0258 return tx_desc->pp22.packet_offset;
0259 }
0260
0261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
0262 struct mvpp2_rx_desc *rx_desc)
0263 {
0264 if (port->priv->hw_version == MVPP21)
0265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
0266 else
0267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
0268 MVPP2_DESC_DMA_MASK;
0269 }
0270
0271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
0272 struct mvpp2_rx_desc *rx_desc)
0273 {
0274 if (port->priv->hw_version == MVPP21)
0275 return le32_to_cpu(rx_desc->pp21.buf_cookie);
0276 else
0277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
0278 MVPP2_DESC_DMA_MASK;
0279 }
0280
0281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
0282 struct mvpp2_rx_desc *rx_desc)
0283 {
0284 if (port->priv->hw_version == MVPP21)
0285 return le16_to_cpu(rx_desc->pp21.data_size);
0286 else
0287 return le16_to_cpu(rx_desc->pp22.data_size);
0288 }
0289
0290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
0291 struct mvpp2_rx_desc *rx_desc)
0292 {
0293 if (port->priv->hw_version == MVPP21)
0294 return le32_to_cpu(rx_desc->pp21.status);
0295 else
0296 return le32_to_cpu(rx_desc->pp22.status);
0297 }
0298
0299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
0300 {
0301 txq_pcpu->txq_get_index++;
0302 if (txq_pcpu->txq_get_index == txq_pcpu->size)
0303 txq_pcpu->txq_get_index = 0;
0304 }
0305
0306 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
0307 struct mvpp2_txq_pcpu *txq_pcpu,
0308 void *data,
0309 struct mvpp2_tx_desc *tx_desc,
0310 enum mvpp2_tx_buf_type buf_type)
0311 {
0312 struct mvpp2_txq_pcpu_buf *tx_buf =
0313 txq_pcpu->buffs + txq_pcpu->txq_put_index;
0314 tx_buf->type = buf_type;
0315 if (buf_type == MVPP2_TYPE_SKB)
0316 tx_buf->skb = data;
0317 else
0318 tx_buf->xdpf = data;
0319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
0320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
0321 mvpp2_txdesc_offset_get(port, tx_desc);
0322 txq_pcpu->txq_put_index++;
0323 if (txq_pcpu->txq_put_index == txq_pcpu->size)
0324 txq_pcpu->txq_put_index = 0;
0325 }
0326
0327
0328 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
0329 {
0330 unsigned int nrxqs;
0331
0332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
0333 return 1;
0334
0335
0336
0337
0338
0339
0340 nrxqs = (num_possible_cpus() + 3) & ~0x3;
0341 if (nrxqs > MVPP2_PORT_MAX_RXQ)
0342 nrxqs = MVPP2_PORT_MAX_RXQ;
0343
0344 return nrxqs;
0345 }
0346
0347
0348 static inline int mvpp2_egress_port(struct mvpp2_port *port)
0349 {
0350 return MVPP2_MAX_TCONT + port->id;
0351 }
0352
0353
0354 static inline int mvpp2_txq_phys(int port, int txq)
0355 {
0356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
0357 }
0358
0359
0360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
0361 struct page_pool *page_pool)
0362 {
0363 if (page_pool)
0364 return page_pool_dev_alloc_pages(page_pool);
0365
0366 if (likely(pool->frag_size <= PAGE_SIZE))
0367 return netdev_alloc_frag(pool->frag_size);
0368
0369 return kmalloc(pool->frag_size, GFP_ATOMIC);
0370 }
0371
0372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
0373 struct page_pool *page_pool, void *data)
0374 {
0375 if (page_pool)
0376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
0377 else if (likely(pool->frag_size <= PAGE_SIZE))
0378 skb_free_frag(data);
0379 else
0380 kfree(data);
0381 }
0382
0383
0384
0385
0386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
0387 struct mvpp2_bm_pool *bm_pool, int size)
0388 {
0389 u32 val;
0390
0391
0392
0393
0394 if (!IS_ALIGNED(size, 16))
0395 return -EINVAL;
0396
0397
0398
0399
0400 if (priv->hw_version == MVPP21)
0401 bm_pool->size_bytes = 2 * sizeof(u32) * size;
0402 else
0403 bm_pool->size_bytes = 2 * sizeof(u64) * size;
0404
0405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
0406 &bm_pool->dma_addr,
0407 GFP_KERNEL);
0408 if (!bm_pool->virt_addr)
0409 return -ENOMEM;
0410
0411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
0412 MVPP2_BM_POOL_PTR_ALIGN)) {
0413 dma_free_coherent(dev, bm_pool->size_bytes,
0414 bm_pool->virt_addr, bm_pool->dma_addr);
0415 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
0416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
0417 return -ENOMEM;
0418 }
0419
0420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
0421 lower_32_bits(bm_pool->dma_addr));
0422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
0423
0424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
0425 val |= MVPP2_BM_START_MASK;
0426
0427 val &= ~MVPP2_BM_LOW_THRESH_MASK;
0428 val &= ~MVPP2_BM_HIGH_THRESH_MASK;
0429
0430
0431 if (priv->hw_version == MVPP23) {
0432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
0433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
0434 } else {
0435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
0436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
0437 }
0438
0439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
0440
0441 bm_pool->size = size;
0442 bm_pool->pkt_size = 0;
0443 bm_pool->buf_num = 0;
0444
0445 return 0;
0446 }
0447
0448
0449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
0450 struct mvpp2_bm_pool *bm_pool,
0451 int buf_size)
0452 {
0453 u32 val;
0454
0455 bm_pool->buf_size = buf_size;
0456
0457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
0458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
0459 }
0460
0461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
0462 struct mvpp2_bm_pool *bm_pool,
0463 dma_addr_t *dma_addr,
0464 phys_addr_t *phys_addr)
0465 {
0466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
0467
0468 *dma_addr = mvpp2_thread_read(priv, thread,
0469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
0470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
0471
0472 if (priv->hw_version >= MVPP22) {
0473 u32 val;
0474 u32 dma_addr_highbits, phys_addr_highbits;
0475
0476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
0477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
0478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
0479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
0480
0481 if (sizeof(dma_addr_t) == 8)
0482 *dma_addr |= (u64)dma_addr_highbits << 32;
0483
0484 if (sizeof(phys_addr_t) == 8)
0485 *phys_addr |= (u64)phys_addr_highbits << 32;
0486 }
0487
0488 put_cpu();
0489 }
0490
0491
0492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
0493 struct mvpp2_bm_pool *bm_pool, int buf_num)
0494 {
0495 struct page_pool *pp = NULL;
0496 int i;
0497
0498 if (buf_num > bm_pool->buf_num) {
0499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
0500 bm_pool->id, buf_num);
0501 buf_num = bm_pool->buf_num;
0502 }
0503
0504 if (priv->percpu_pools)
0505 pp = priv->page_pool[bm_pool->id];
0506
0507 for (i = 0; i < buf_num; i++) {
0508 dma_addr_t buf_dma_addr;
0509 phys_addr_t buf_phys_addr;
0510 void *data;
0511
0512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
0513 &buf_dma_addr, &buf_phys_addr);
0514
0515 if (!pp)
0516 dma_unmap_single(dev, buf_dma_addr,
0517 bm_pool->buf_size, DMA_FROM_DEVICE);
0518
0519 data = (void *)phys_to_virt(buf_phys_addr);
0520 if (!data)
0521 break;
0522
0523 mvpp2_frag_free(bm_pool, pp, data);
0524 }
0525
0526
0527 bm_pool->buf_num -= i;
0528 }
0529
0530
0531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
0532 {
0533 int buf_num = 0;
0534
0535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
0536 MVPP22_BM_POOL_PTRS_NUM_MASK;
0537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
0538 MVPP2_BM_BPPI_PTR_NUM_MASK;
0539
0540
0541 if (buf_num)
0542 buf_num += 1;
0543
0544 return buf_num;
0545 }
0546
0547
0548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
0549 struct mvpp2_bm_pool *bm_pool)
0550 {
0551 int buf_num;
0552 u32 val;
0553
0554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
0555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
0556
0557
0558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
0559 if (buf_num) {
0560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
0561 bm_pool->id, bm_pool->buf_num);
0562 return 0;
0563 }
0564
0565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
0566 val |= MVPP2_BM_STOP_MASK;
0567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
0568
0569 if (priv->percpu_pools) {
0570 page_pool_destroy(priv->page_pool[bm_pool->id]);
0571 priv->page_pool[bm_pool->id] = NULL;
0572 }
0573
0574 dma_free_coherent(dev, bm_pool->size_bytes,
0575 bm_pool->virt_addr,
0576 bm_pool->dma_addr);
0577 return 0;
0578 }
0579
0580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
0581 {
0582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
0583 struct mvpp2_bm_pool *bm_pool;
0584
0585 if (priv->percpu_pools)
0586 poolnum = mvpp2_get_nrxqs(priv) * 2;
0587
0588
0589 size = MVPP2_BM_POOL_SIZE_MAX;
0590 for (i = 0; i < poolnum; i++) {
0591 bm_pool = &priv->bm_pools[i];
0592 bm_pool->id = i;
0593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
0594 if (err)
0595 goto err_unroll_pools;
0596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
0597 }
0598 return 0;
0599
0600 err_unroll_pools:
0601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
0602 for (i = i - 1; i >= 0; i--)
0603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
0604 return err;
0605 }
0606
0607
0608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
0609 {
0610 int val;
0611
0612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
0613 val |= MVPP23_BM_8POOL_MODE;
0614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
0615 }
0616
0617 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
0618 {
0619 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
0620 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
0621 struct mvpp2_port *port;
0622
0623 if (priv->percpu_pools) {
0624 for (i = 0; i < priv->port_count; i++) {
0625 port = priv->port_list[i];
0626 if (port->xdp_prog) {
0627 dma_dir = DMA_BIDIRECTIONAL;
0628 break;
0629 }
0630 }
0631
0632 poolnum = mvpp2_get_nrxqs(priv) * 2;
0633 for (i = 0; i < poolnum; i++) {
0634
0635 int pn = i / (poolnum / 2);
0636
0637 priv->page_pool[i] =
0638 mvpp2_create_page_pool(dev,
0639 mvpp2_pools[pn].buf_num,
0640 mvpp2_pools[pn].pkt_size,
0641 dma_dir);
0642 if (IS_ERR(priv->page_pool[i])) {
0643 int j;
0644
0645 for (j = 0; j < i; j++) {
0646 page_pool_destroy(priv->page_pool[j]);
0647 priv->page_pool[j] = NULL;
0648 }
0649 return PTR_ERR(priv->page_pool[i]);
0650 }
0651 }
0652 }
0653
0654 dev_info(dev, "using %d %s buffers\n", poolnum,
0655 priv->percpu_pools ? "per-cpu" : "shared");
0656
0657 for (i = 0; i < poolnum; i++) {
0658
0659 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
0660
0661 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
0662 }
0663
0664
0665 priv->bm_pools = devm_kcalloc(dev, poolnum,
0666 sizeof(*priv->bm_pools), GFP_KERNEL);
0667 if (!priv->bm_pools)
0668 return -ENOMEM;
0669
0670 if (priv->hw_version == MVPP23)
0671 mvpp23_bm_set_8pool_mode(priv);
0672
0673 err = mvpp2_bm_pools_init(dev, priv);
0674 if (err < 0)
0675 return err;
0676 return 0;
0677 }
0678
0679 static void mvpp2_setup_bm_pool(void)
0680 {
0681
0682 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
0683 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
0684
0685
0686 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
0687 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
0688
0689
0690 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
0691 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
0692 }
0693
0694
0695 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
0696 int lrxq, int long_pool)
0697 {
0698 u32 val, mask;
0699 int prxq;
0700
0701
0702 prxq = port->rxqs[lrxq]->id;
0703
0704 if (port->priv->hw_version == MVPP21)
0705 mask = MVPP21_RXQ_POOL_LONG_MASK;
0706 else
0707 mask = MVPP22_RXQ_POOL_LONG_MASK;
0708
0709 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
0710 val &= ~mask;
0711 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
0712 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
0713 }
0714
0715
0716 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
0717 int lrxq, int short_pool)
0718 {
0719 u32 val, mask;
0720 int prxq;
0721
0722
0723 prxq = port->rxqs[lrxq]->id;
0724
0725 if (port->priv->hw_version == MVPP21)
0726 mask = MVPP21_RXQ_POOL_SHORT_MASK;
0727 else
0728 mask = MVPP22_RXQ_POOL_SHORT_MASK;
0729
0730 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
0731 val &= ~mask;
0732 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
0733 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
0734 }
0735
0736 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
0737 struct mvpp2_bm_pool *bm_pool,
0738 struct page_pool *page_pool,
0739 dma_addr_t *buf_dma_addr,
0740 phys_addr_t *buf_phys_addr,
0741 gfp_t gfp_mask)
0742 {
0743 dma_addr_t dma_addr;
0744 struct page *page;
0745 void *data;
0746
0747 data = mvpp2_frag_alloc(bm_pool, page_pool);
0748 if (!data)
0749 return NULL;
0750
0751 if (page_pool) {
0752 page = (struct page *)data;
0753 dma_addr = page_pool_get_dma_addr(page);
0754 data = page_to_virt(page);
0755 } else {
0756 dma_addr = dma_map_single(port->dev->dev.parent, data,
0757 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
0758 DMA_FROM_DEVICE);
0759 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
0760 mvpp2_frag_free(bm_pool, NULL, data);
0761 return NULL;
0762 }
0763 }
0764 *buf_dma_addr = dma_addr;
0765 *buf_phys_addr = virt_to_phys(data);
0766
0767 return data;
0768 }
0769
0770
0771 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
0772 {
0773 int val, cm3_state, host_id, q;
0774 int fq = port->first_rxq;
0775 unsigned long flags;
0776
0777 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
0778
0779
0780
0781
0782 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0783 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
0784 val &= ~FLOW_CONTROL_ENABLE_BIT;
0785 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0786
0787
0788 for (q = 0; q < port->nrxqs; q++) {
0789
0790 val = MSS_THRESHOLD_START;
0791 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
0792 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
0793
0794 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
0795
0796 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
0797 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
0798 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
0799 + MSS_RXQ_ASS_HOSTID_OFFS));
0800
0801
0802
0803
0804
0805
0806
0807
0808 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
0809 host_id = port->nqvecs;
0810 else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
0811 host_id = q;
0812 else
0813 host_id = 0;
0814
0815
0816 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
0817 + MSS_RXQ_ASS_HOSTID_OFFS));
0818
0819 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
0820 }
0821
0822
0823 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0824 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
0825 val |= cm3_state;
0826 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0827
0828 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
0829 }
0830
0831
0832 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
0833 {
0834 int val, cm3_state, q;
0835 unsigned long flags;
0836 int fq = port->first_rxq;
0837
0838 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
0839
0840
0841
0842
0843 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0844 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
0845 val &= ~FLOW_CONTROL_ENABLE_BIT;
0846 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0847
0848
0849 for (q = 0; q < port->nrxqs; q++) {
0850
0851 val = 0;
0852 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
0853 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
0854
0855 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
0856
0857 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
0858
0859 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
0860 + MSS_RXQ_ASS_HOSTID_OFFS));
0861
0862 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
0863 }
0864
0865
0866 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0867 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
0868 val |= cm3_state;
0869 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0870
0871 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
0872 }
0873
0874
0875 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
0876 struct mvpp2_bm_pool *pool,
0877 bool en)
0878 {
0879 int val, cm3_state;
0880 unsigned long flags;
0881
0882 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
0883
0884
0885
0886
0887 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0888 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
0889 val &= ~FLOW_CONTROL_ENABLE_BIT;
0890 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0891
0892
0893 if (en) {
0894
0895 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
0896 val |= MSS_BUF_POOL_PORT_OFFS(port->id);
0897 val &= ~MSS_BUF_POOL_START_MASK;
0898 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
0899 val &= ~MSS_BUF_POOL_STOP_MASK;
0900 val |= MSS_THRESHOLD_STOP;
0901 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
0902 } else {
0903
0904 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
0905 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
0906
0907
0908
0909
0910 if (!pool->buf_num) {
0911 val &= ~MSS_BUF_POOL_START_MASK;
0912 val &= ~MSS_BUF_POOL_STOP_MASK;
0913 }
0914
0915 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
0916 }
0917
0918
0919 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
0920 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
0921 val |= cm3_state;
0922 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
0923
0924 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
0925 }
0926
0927
0928 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
0929 {
0930 struct mvpp2_port *port;
0931 int i;
0932
0933 for (i = 0; i < priv->port_count; i++) {
0934 port = priv->port_list[i];
0935 if (port->priv->percpu_pools) {
0936 for (i = 0; i < port->nrxqs; i++)
0937 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
0938 port->tx_fc & en);
0939 } else {
0940 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
0941 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
0942 }
0943 }
0944 }
0945
0946 static int mvpp2_enable_global_fc(struct mvpp2 *priv)
0947 {
0948 int val, timeout = 0;
0949
0950
0951
0952
0953 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
0954 val |= FLOW_CONTROL_ENABLE_BIT;
0955 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
0956
0957
0958 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
0959 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
0960
0961 while (timeout < MSS_FC_MAX_TIMEOUT) {
0962 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
0963
0964 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
0965 return 0;
0966 usleep_range(10, 20);
0967 timeout++;
0968 }
0969
0970 priv->global_tx_fc = false;
0971 return -EOPNOTSUPP;
0972 }
0973
0974
0975 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
0976 dma_addr_t buf_dma_addr,
0977 phys_addr_t buf_phys_addr)
0978 {
0979 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
0980 unsigned long flags = 0;
0981
0982 if (test_bit(thread, &port->priv->lock_map))
0983 spin_lock_irqsave(&port->bm_lock[thread], flags);
0984
0985 if (port->priv->hw_version >= MVPP22) {
0986 u32 val = 0;
0987
0988 if (sizeof(dma_addr_t) == 8)
0989 val |= upper_32_bits(buf_dma_addr) &
0990 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
0991
0992 if (sizeof(phys_addr_t) == 8)
0993 val |= (upper_32_bits(buf_phys_addr)
0994 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
0995 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
0996
0997 mvpp2_thread_write_relaxed(port->priv, thread,
0998 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
0999 }
1000
1001
1002
1003
1004
1005
1006 mvpp2_thread_write_relaxed(port->priv, thread,
1007 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
1008 mvpp2_thread_write_relaxed(port->priv, thread,
1009 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1010
1011 if (test_bit(thread, &port->priv->lock_map))
1012 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1013
1014 put_cpu();
1015 }
1016
1017
1018 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1019 struct mvpp2_bm_pool *bm_pool, int buf_num)
1020 {
1021 int i, buf_size, total_size;
1022 dma_addr_t dma_addr;
1023 phys_addr_t phys_addr;
1024 struct page_pool *pp = NULL;
1025 void *buf;
1026
1027 if (port->priv->percpu_pools &&
1028 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1029 netdev_err(port->dev,
1030 "attempted to use jumbo frames with per-cpu pools");
1031 return 0;
1032 }
1033
1034 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1035 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1036
1037 if (buf_num < 0 ||
1038 (buf_num + bm_pool->buf_num > bm_pool->size)) {
1039 netdev_err(port->dev,
1040 "cannot allocate %d buffers for pool %d\n",
1041 buf_num, bm_pool->id);
1042 return 0;
1043 }
1044
1045 if (port->priv->percpu_pools)
1046 pp = port->priv->page_pool[bm_pool->id];
1047 for (i = 0; i < buf_num; i++) {
1048 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1049 &phys_addr, GFP_KERNEL);
1050 if (!buf)
1051 break;
1052
1053 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1054 phys_addr);
1055 }
1056
1057
1058 bm_pool->buf_num += i;
1059
1060 netdev_dbg(port->dev,
1061 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1062 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1063
1064 netdev_dbg(port->dev,
1065 "pool %d: %d of %d buffers added\n",
1066 bm_pool->id, i, buf_num);
1067 return i;
1068 }
1069
1070
1071
1072
1073 static struct mvpp2_bm_pool *
1074 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1075 {
1076 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1077 int num;
1078
1079 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
1080 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
1081 netdev_err(port->dev, "Invalid pool %d\n", pool);
1082 return NULL;
1083 }
1084
1085
1086
1087
1088 if (new_pool->pkt_size == 0) {
1089 int pkts_num;
1090
1091
1092
1093
1094 pkts_num = new_pool->buf_num;
1095 if (pkts_num == 0) {
1096 if (port->priv->percpu_pools) {
1097 if (pool < port->nrxqs)
1098 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
1099 else
1100 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
1101 } else {
1102 pkts_num = mvpp2_pools[pool].buf_num;
1103 }
1104 } else {
1105 mvpp2_bm_bufs_free(port->dev->dev.parent,
1106 port->priv, new_pool, pkts_num);
1107 }
1108
1109 new_pool->pkt_size = pkt_size;
1110 new_pool->frag_size =
1111 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1112 MVPP2_SKB_SHINFO_SIZE;
1113
1114
1115 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1116 if (num != pkts_num) {
1117 WARN(1, "pool %d: %d of %d allocated\n",
1118 new_pool->id, num, pkts_num);
1119 return NULL;
1120 }
1121 }
1122
1123 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1124 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1125
1126 return new_pool;
1127 }
1128
1129 static struct mvpp2_bm_pool *
1130 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
1131 unsigned int pool, int pkt_size)
1132 {
1133 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1134 int num;
1135
1136 if (pool > port->nrxqs * 2) {
1137 netdev_err(port->dev, "Invalid pool %d\n", pool);
1138 return NULL;
1139 }
1140
1141
1142
1143
1144 if (new_pool->pkt_size == 0) {
1145 int pkts_num;
1146
1147
1148
1149
1150 pkts_num = new_pool->buf_num;
1151 if (pkts_num == 0)
1152 pkts_num = mvpp2_pools[type].buf_num;
1153 else
1154 mvpp2_bm_bufs_free(port->dev->dev.parent,
1155 port->priv, new_pool, pkts_num);
1156
1157 new_pool->pkt_size = pkt_size;
1158 new_pool->frag_size =
1159 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1160 MVPP2_SKB_SHINFO_SIZE;
1161
1162
1163 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1164 if (num != pkts_num) {
1165 WARN(1, "pool %d: %d of %d allocated\n",
1166 new_pool->id, num, pkts_num);
1167 return NULL;
1168 }
1169 }
1170
1171 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1172 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1173
1174 return new_pool;
1175 }
1176
1177
1178 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1179 {
1180 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
1181 int rxq;
1182
1183
1184
1185
1186
1187 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1188 long_log_pool = MVPP2_BM_JUMBO;
1189 short_log_pool = MVPP2_BM_LONG;
1190 } else {
1191 long_log_pool = MVPP2_BM_LONG;
1192 short_log_pool = MVPP2_BM_SHORT;
1193 }
1194
1195 if (!port->pool_long) {
1196 port->pool_long =
1197 mvpp2_bm_pool_use(port, long_log_pool,
1198 mvpp2_pools[long_log_pool].pkt_size);
1199 if (!port->pool_long)
1200 return -ENOMEM;
1201
1202 port->pool_long->port_map |= BIT(port->id);
1203
1204 for (rxq = 0; rxq < port->nrxqs; rxq++)
1205 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1206 }
1207
1208 if (!port->pool_short) {
1209 port->pool_short =
1210 mvpp2_bm_pool_use(port, short_log_pool,
1211 mvpp2_pools[short_log_pool].pkt_size);
1212 if (!port->pool_short)
1213 return -ENOMEM;
1214
1215 port->pool_short->port_map |= BIT(port->id);
1216
1217 for (rxq = 0; rxq < port->nrxqs; rxq++)
1218 mvpp2_rxq_short_pool_set(port, rxq,
1219 port->pool_short->id);
1220 }
1221
1222 return 0;
1223 }
1224
1225
1226 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
1227 {
1228 struct mvpp2_bm_pool *bm_pool;
1229 int i;
1230
1231 for (i = 0; i < port->nrxqs; i++) {
1232 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
1233 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1234 if (!bm_pool)
1235 return -ENOMEM;
1236
1237 bm_pool->port_map |= BIT(port->id);
1238 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1239 }
1240
1241 for (i = 0; i < port->nrxqs; i++) {
1242 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1243 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1244 if (!bm_pool)
1245 return -ENOMEM;
1246
1247 bm_pool->port_map |= BIT(port->id);
1248 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1249 }
1250
1251 port->pool_long = NULL;
1252 port->pool_short = NULL;
1253
1254 return 0;
1255 }
1256
1257 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1258 {
1259 if (port->priv->percpu_pools)
1260 return mvpp2_swf_bm_pool_init_percpu(port);
1261 else
1262 return mvpp2_swf_bm_pool_init_shared(port);
1263 }
1264
1265 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1266 enum mvpp2_bm_pool_log_num new_long_pool)
1267 {
1268 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1269
1270
1271
1272
1273
1274
1275
1276 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1277 port->dev->features &= ~csums;
1278 port->dev->hw_features &= ~csums;
1279 } else {
1280 port->dev->features |= csums;
1281 port->dev->hw_features |= csums;
1282 }
1283 }
1284
1285 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1286 {
1287 struct mvpp2_port *port = netdev_priv(dev);
1288 enum mvpp2_bm_pool_log_num new_long_pool;
1289 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1290
1291 if (port->priv->percpu_pools)
1292 goto out_set;
1293
1294
1295
1296
1297
1298 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1299 new_long_pool = MVPP2_BM_JUMBO;
1300 else
1301 new_long_pool = MVPP2_BM_LONG;
1302
1303 if (new_long_pool != port->pool_long->id) {
1304 if (port->tx_fc) {
1305 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1306 mvpp2_bm_pool_update_fc(port,
1307 port->pool_short,
1308 false);
1309 else
1310 mvpp2_bm_pool_update_fc(port, port->pool_long,
1311 false);
1312 }
1313
1314
1315 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1316 port->pool_long->pkt_size);
1317 port->pool_long->port_map &= ~BIT(port->id);
1318 port->pool_long = NULL;
1319
1320 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1321 port->pool_short->pkt_size);
1322 port->pool_short->port_map &= ~BIT(port->id);
1323 port->pool_short = NULL;
1324
1325 port->pkt_size = pkt_size;
1326
1327
1328 mvpp2_swf_bm_pool_init(port);
1329
1330 mvpp2_set_hw_csum(port, new_long_pool);
1331
1332 if (port->tx_fc) {
1333 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1334 mvpp2_bm_pool_update_fc(port, port->pool_long,
1335 true);
1336 else
1337 mvpp2_bm_pool_update_fc(port, port->pool_short,
1338 true);
1339 }
1340
1341
1342 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1343 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
1344 dev->hw_features &= ~(NETIF_F_IP_CSUM |
1345 NETIF_F_IPV6_CSUM);
1346 } else {
1347 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1348 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1349 }
1350 }
1351
1352 out_set:
1353 dev->mtu = mtu;
1354 dev->wanted_features = dev->features;
1355
1356 netdev_update_features(dev);
1357 return 0;
1358 }
1359
1360 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1361 {
1362 int i, sw_thread_mask = 0;
1363
1364 for (i = 0; i < port->nqvecs; i++)
1365 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1366
1367 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1368 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1369 }
1370
1371 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1372 {
1373 int i, sw_thread_mask = 0;
1374
1375 for (i = 0; i < port->nqvecs; i++)
1376 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1377
1378 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1379 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1380 }
1381
1382 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1383 {
1384 struct mvpp2_port *port = qvec->port;
1385
1386 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1387 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1388 }
1389
1390 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1391 {
1392 struct mvpp2_port *port = qvec->port;
1393
1394 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1395 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1396 }
1397
1398
1399
1400
1401
1402 static void mvpp2_interrupts_mask(void *arg)
1403 {
1404 struct mvpp2_port *port = arg;
1405 int cpu = smp_processor_id();
1406 u32 thread;
1407
1408
1409 if (cpu > port->priv->nthreads)
1410 return;
1411
1412 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1413
1414 mvpp2_thread_write(port->priv, thread,
1415 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1416 mvpp2_thread_write(port->priv, thread,
1417 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1418 }
1419
1420
1421
1422
1423
1424 static void mvpp2_interrupts_unmask(void *arg)
1425 {
1426 struct mvpp2_port *port = arg;
1427 int cpu = smp_processor_id();
1428 u32 val, thread;
1429
1430
1431 if (cpu >= port->priv->nthreads)
1432 return;
1433
1434 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1435
1436 val = MVPP2_CAUSE_MISC_SUM_MASK |
1437 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1438 if (port->has_tx_irqs)
1439 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1440
1441 mvpp2_thread_write(port->priv, thread,
1442 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1443 mvpp2_thread_write(port->priv, thread,
1444 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1445 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1446 }
1447
1448 static void
1449 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1450 {
1451 u32 val;
1452 int i;
1453
1454 if (port->priv->hw_version == MVPP21)
1455 return;
1456
1457 if (mask)
1458 val = 0;
1459 else
1460 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1461
1462 for (i = 0; i < port->nqvecs; i++) {
1463 struct mvpp2_queue_vector *v = port->qvecs + i;
1464
1465 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1466 continue;
1467
1468 mvpp2_thread_write(port->priv, v->sw_thread_id,
1469 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1470 mvpp2_thread_write(port->priv, v->sw_thread_id,
1471 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1472 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1473 }
1474 }
1475
1476
1477 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1478 {
1479 return port->gop_id == 0;
1480 }
1481
1482 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1483 {
1484 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1485 }
1486
1487
1488 static bool mvpp2_is_xlg(phy_interface_t interface)
1489 {
1490 return interface == PHY_INTERFACE_MODE_10GBASER ||
1491 interface == PHY_INTERFACE_MODE_5GBASER ||
1492 interface == PHY_INTERFACE_MODE_XAUI;
1493 }
1494
1495 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1496 {
1497 u32 old, val;
1498
1499 old = val = readl(ptr);
1500 val &= ~mask;
1501 val |= set;
1502 if (old != val)
1503 writel(val, ptr);
1504 }
1505
1506 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1507 {
1508 struct mvpp2 *priv = port->priv;
1509 u32 val;
1510
1511 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1512 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1513 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1514
1515 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1516 if (port->gop_id == 2)
1517 val |= GENCONF_CTRL0_PORT2_RGMII;
1518 else if (port->gop_id == 3)
1519 val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1520 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1521 }
1522
1523 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1524 {
1525 struct mvpp2 *priv = port->priv;
1526 u32 val;
1527
1528 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1529 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1530 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1531 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1532
1533 if (port->gop_id > 1) {
1534 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1535 if (port->gop_id == 2)
1536 val &= ~GENCONF_CTRL0_PORT2_RGMII;
1537 else if (port->gop_id == 3)
1538 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1539 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1540 }
1541 }
1542
1543 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1544 {
1545 struct mvpp2 *priv = port->priv;
1546 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1547 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1548 u32 val;
1549
1550 val = readl(xpcs + MVPP22_XPCS_CFG0);
1551 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1552 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1553 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1554 writel(val, xpcs + MVPP22_XPCS_CFG0);
1555
1556 val = readl(mpcs + MVPP22_MPCS_CTRL);
1557 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1558 writel(val, mpcs + MVPP22_MPCS_CTRL);
1559
1560 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1561 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1562 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1563 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1564 }
1565
1566 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
1567 {
1568 struct mvpp2 *priv = port->priv;
1569 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1570 u32 val;
1571
1572 val = readl(fca + MVPP22_FCA_CONTROL_REG);
1573 val &= ~MVPP22_FCA_ENABLE_PERIODIC;
1574 if (en)
1575 val |= MVPP22_FCA_ENABLE_PERIODIC;
1576 writel(val, fca + MVPP22_FCA_CONTROL_REG);
1577 }
1578
1579 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
1580 {
1581 struct mvpp2 *priv = port->priv;
1582 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1583 u32 lsb, msb;
1584
1585 lsb = timer & MVPP22_FCA_REG_MASK;
1586 msb = timer >> MVPP22_FCA_REG_SIZE;
1587
1588 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
1589 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
1590 }
1591
1592
1593
1594
1595 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
1596 {
1597 u32 timer;
1598
1599 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
1600 * FC_QUANTA;
1601
1602 mvpp22_gop_fca_enable_periodic(port, false);
1603
1604 mvpp22_gop_fca_set_timer(port, timer);
1605
1606 mvpp22_gop_fca_enable_periodic(port, true);
1607 }
1608
1609 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
1610 {
1611 struct mvpp2 *priv = port->priv;
1612 u32 val;
1613
1614 if (!priv->sysctrl_base)
1615 return 0;
1616
1617 switch (interface) {
1618 case PHY_INTERFACE_MODE_RGMII:
1619 case PHY_INTERFACE_MODE_RGMII_ID:
1620 case PHY_INTERFACE_MODE_RGMII_RXID:
1621 case PHY_INTERFACE_MODE_RGMII_TXID:
1622 if (!mvpp2_port_supports_rgmii(port))
1623 goto invalid_conf;
1624 mvpp22_gop_init_rgmii(port);
1625 break;
1626 case PHY_INTERFACE_MODE_SGMII:
1627 case PHY_INTERFACE_MODE_1000BASEX:
1628 case PHY_INTERFACE_MODE_2500BASEX:
1629 mvpp22_gop_init_sgmii(port);
1630 break;
1631 case PHY_INTERFACE_MODE_5GBASER:
1632 case PHY_INTERFACE_MODE_10GBASER:
1633 if (!mvpp2_port_supports_xlg(port))
1634 goto invalid_conf;
1635 mvpp22_gop_init_10gkr(port);
1636 break;
1637 default:
1638 goto unsupported_conf;
1639 }
1640
1641 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1642 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1643 GENCONF_PORT_CTRL1_EN(port->gop_id);
1644 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1645
1646 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1647 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1648 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1649
1650 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1651 val |= GENCONF_SOFT_RESET1_GOP;
1652 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1653
1654 mvpp22_gop_fca_set_periodic_timer(port);
1655
1656 unsupported_conf:
1657 return 0;
1658
1659 invalid_conf:
1660 netdev_err(port->dev, "Invalid port configuration\n");
1661 return -EINVAL;
1662 }
1663
1664 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1665 {
1666 u32 val;
1667
1668 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1669 phy_interface_mode_is_8023z(port->phy_interface) ||
1670 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1671
1672 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1673 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1674 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1675 }
1676
1677 if (mvpp2_port_supports_xlg(port)) {
1678
1679 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1680 if (mvpp2_is_xlg(port->phy_interface))
1681 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1682 else
1683 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1684 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1685 }
1686 }
1687
1688 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1689 {
1690 u32 val;
1691
1692 if (mvpp2_port_supports_xlg(port)) {
1693 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1694 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1695 MVPP22_XLG_EXT_INT_MASK_GIG);
1696 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1697 }
1698
1699 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1700 phy_interface_mode_is_8023z(port->phy_interface) ||
1701 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1702 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1703 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1704 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1705 }
1706 }
1707
1708 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1709 {
1710 u32 val;
1711
1712 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1713 MVPP22_GMAC_INT_SUM_MASK_PTP,
1714 MVPP22_GMAC_INT_SUM_MASK_PTP);
1715
1716 if (port->phylink ||
1717 phy_interface_mode_is_rgmii(port->phy_interface) ||
1718 phy_interface_mode_is_8023z(port->phy_interface) ||
1719 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1720 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1721 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1722 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1723 }
1724
1725 if (mvpp2_port_supports_xlg(port)) {
1726 val = readl(port->base + MVPP22_XLG_INT_MASK);
1727 val |= MVPP22_XLG_INT_MASK_LINK;
1728 writel(val, port->base + MVPP22_XLG_INT_MASK);
1729
1730 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1731 MVPP22_XLG_EXT_INT_MASK_PTP,
1732 MVPP22_XLG_EXT_INT_MASK_PTP);
1733 }
1734
1735 mvpp22_gop_unmask_irq(port);
1736 }
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748 static int mvpp22_comphy_init(struct mvpp2_port *port,
1749 phy_interface_t interface)
1750 {
1751 int ret;
1752
1753 if (!port->comphy)
1754 return 0;
1755
1756 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
1757 if (ret)
1758 return ret;
1759
1760 return phy_power_on(port->comphy);
1761 }
1762
1763 static void mvpp2_port_enable(struct mvpp2_port *port)
1764 {
1765 u32 val;
1766
1767 if (mvpp2_port_supports_xlg(port) &&
1768 mvpp2_is_xlg(port->phy_interface)) {
1769 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1770 val |= MVPP22_XLG_CTRL0_PORT_EN;
1771 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1772 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1773 } else {
1774 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1775 val |= MVPP2_GMAC_PORT_EN_MASK;
1776 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1777 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1778 }
1779 }
1780
1781 static void mvpp2_port_disable(struct mvpp2_port *port)
1782 {
1783 u32 val;
1784
1785 if (mvpp2_port_supports_xlg(port) &&
1786 mvpp2_is_xlg(port->phy_interface)) {
1787 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1788 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1789 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1790 }
1791
1792 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1793 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1794 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1795 }
1796
1797
1798 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1799 {
1800 u32 val;
1801
1802 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1803 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1804 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1805 }
1806
1807
1808 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1809 const struct phylink_link_state *state)
1810 {
1811 u32 val;
1812
1813 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1814
1815 if (state->speed == 1000)
1816 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1817 else
1818 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1819
1820 if (phy_interface_mode_is_8023z(state->interface) ||
1821 state->interface == PHY_INTERFACE_MODE_SGMII)
1822 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1823 else
1824 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1825
1826 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1827 }
1828
1829 enum {
1830 ETHTOOL_XDP_REDIRECT,
1831 ETHTOOL_XDP_PASS,
1832 ETHTOOL_XDP_DROP,
1833 ETHTOOL_XDP_TX,
1834 ETHTOOL_XDP_TX_ERR,
1835 ETHTOOL_XDP_XMIT,
1836 ETHTOOL_XDP_XMIT_ERR,
1837 };
1838
1839 struct mvpp2_ethtool_counter {
1840 unsigned int offset;
1841 const char string[ETH_GSTRING_LEN];
1842 bool reg_is_64b;
1843 };
1844
1845 static u64 mvpp2_read_count(struct mvpp2_port *port,
1846 const struct mvpp2_ethtool_counter *counter)
1847 {
1848 u64 val;
1849
1850 val = readl(port->stats_base + counter->offset);
1851 if (counter->reg_is_64b)
1852 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1853
1854 return val;
1855 }
1856
1857
1858
1859
1860
1861
1862 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1863 {
1864 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1865 return mvpp2_read(priv, reg);
1866 }
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1878 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1879 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1880 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1881 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1882 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1883 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1884 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1885 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1886 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1887 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1888 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1889 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1890 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1891 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1892 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1893 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1894 { MVPP2_MIB_FC_SENT, "fc_sent" },
1895 { MVPP2_MIB_FC_RCVD, "fc_received" },
1896 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1897 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1898 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1899 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1900 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1901 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1902 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1903 { MVPP2_MIB_COLLISION, "collision" },
1904 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1905 };
1906
1907 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1908 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1909 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1910 };
1911
1912 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1913 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1914 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1915 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1916 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1917 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1918 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1919 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1920 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1921 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1922 };
1923
1924 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1925 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1926 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1927 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1928 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1929 };
1930
1931 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1932 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1933 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1934 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1935 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1936 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1937 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1938 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1939 };
1940
1941 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1942 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1943 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1944 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1945 ARRAY_SIZE(mvpp2_ethtool_xdp))
1946
1947 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1948 u8 *data)
1949 {
1950 struct mvpp2_port *port = netdev_priv(netdev);
1951 int i, q;
1952
1953 if (sset != ETH_SS_STATS)
1954 return;
1955
1956 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1957 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1958 ETH_GSTRING_LEN);
1959 data += ETH_GSTRING_LEN;
1960 }
1961
1962 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1963 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1964 ETH_GSTRING_LEN);
1965 data += ETH_GSTRING_LEN;
1966 }
1967
1968 for (q = 0; q < port->ntxqs; q++) {
1969 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1970 snprintf(data, ETH_GSTRING_LEN,
1971 mvpp2_ethtool_txq_regs[i].string, q);
1972 data += ETH_GSTRING_LEN;
1973 }
1974 }
1975
1976 for (q = 0; q < port->nrxqs; q++) {
1977 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1978 snprintf(data, ETH_GSTRING_LEN,
1979 mvpp2_ethtool_rxq_regs[i].string,
1980 q);
1981 data += ETH_GSTRING_LEN;
1982 }
1983 }
1984
1985 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1986 strscpy(data, mvpp2_ethtool_xdp[i].string,
1987 ETH_GSTRING_LEN);
1988 data += ETH_GSTRING_LEN;
1989 }
1990 }
1991
1992 static void
1993 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1994 {
1995 unsigned int start;
1996 unsigned int cpu;
1997
1998
1999 for_each_possible_cpu(cpu) {
2000 struct mvpp2_pcpu_stats *cpu_stats;
2001 u64 xdp_redirect;
2002 u64 xdp_pass;
2003 u64 xdp_drop;
2004 u64 xdp_xmit;
2005 u64 xdp_xmit_err;
2006 u64 xdp_tx;
2007 u64 xdp_tx_err;
2008
2009 cpu_stats = per_cpu_ptr(port->stats, cpu);
2010 do {
2011 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2012 xdp_redirect = cpu_stats->xdp_redirect;
2013 xdp_pass = cpu_stats->xdp_pass;
2014 xdp_drop = cpu_stats->xdp_drop;
2015 xdp_xmit = cpu_stats->xdp_xmit;
2016 xdp_xmit_err = cpu_stats->xdp_xmit_err;
2017 xdp_tx = cpu_stats->xdp_tx;
2018 xdp_tx_err = cpu_stats->xdp_tx_err;
2019 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2020
2021 xdp_stats->xdp_redirect += xdp_redirect;
2022 xdp_stats->xdp_pass += xdp_pass;
2023 xdp_stats->xdp_drop += xdp_drop;
2024 xdp_stats->xdp_xmit += xdp_xmit;
2025 xdp_stats->xdp_xmit_err += xdp_xmit_err;
2026 xdp_stats->xdp_tx += xdp_tx;
2027 xdp_stats->xdp_tx_err += xdp_tx_err;
2028 }
2029 }
2030
2031 static void mvpp2_read_stats(struct mvpp2_port *port)
2032 {
2033 struct mvpp2_pcpu_stats xdp_stats = {};
2034 const struct mvpp2_ethtool_counter *s;
2035 u64 *pstats;
2036 int i, q;
2037
2038 pstats = port->ethtool_stats;
2039
2040 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
2041 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
2042
2043 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
2044 *pstats++ += mvpp2_read(port->priv,
2045 mvpp2_ethtool_port_regs[i].offset +
2046 4 * port->id);
2047
2048 for (q = 0; q < port->ntxqs; q++)
2049 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
2050 *pstats++ += mvpp2_read_index(port->priv,
2051 MVPP22_CTRS_TX_CTR(port->id, q),
2052 mvpp2_ethtool_txq_regs[i].offset);
2053
2054
2055
2056
2057 for (q = 0; q < port->nrxqs; q++)
2058 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
2059 *pstats++ += mvpp2_read_index(port->priv,
2060 port->first_rxq + q,
2061 mvpp2_ethtool_rxq_regs[i].offset);
2062
2063
2064 mvpp2_get_xdp_stats(port, &xdp_stats);
2065
2066 for (i = 0, s = mvpp2_ethtool_xdp;
2067 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
2068 s++, i++) {
2069 switch (s->offset) {
2070 case ETHTOOL_XDP_REDIRECT:
2071 *pstats++ = xdp_stats.xdp_redirect;
2072 break;
2073 case ETHTOOL_XDP_PASS:
2074 *pstats++ = xdp_stats.xdp_pass;
2075 break;
2076 case ETHTOOL_XDP_DROP:
2077 *pstats++ = xdp_stats.xdp_drop;
2078 break;
2079 case ETHTOOL_XDP_TX:
2080 *pstats++ = xdp_stats.xdp_tx;
2081 break;
2082 case ETHTOOL_XDP_TX_ERR:
2083 *pstats++ = xdp_stats.xdp_tx_err;
2084 break;
2085 case ETHTOOL_XDP_XMIT:
2086 *pstats++ = xdp_stats.xdp_xmit;
2087 break;
2088 case ETHTOOL_XDP_XMIT_ERR:
2089 *pstats++ = xdp_stats.xdp_xmit_err;
2090 break;
2091 }
2092 }
2093 }
2094
2095 static void mvpp2_gather_hw_statistics(struct work_struct *work)
2096 {
2097 struct delayed_work *del_work = to_delayed_work(work);
2098 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2099 stats_work);
2100
2101 mutex_lock(&port->gather_stats_lock);
2102
2103 mvpp2_read_stats(port);
2104
2105
2106
2107
2108 cancel_delayed_work(&port->stats_work);
2109 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2110 MVPP2_MIB_COUNTERS_STATS_DELAY);
2111
2112 mutex_unlock(&port->gather_stats_lock);
2113 }
2114
2115 static void mvpp2_ethtool_get_stats(struct net_device *dev,
2116 struct ethtool_stats *stats, u64 *data)
2117 {
2118 struct mvpp2_port *port = netdev_priv(dev);
2119
2120
2121
2122
2123 mvpp2_gather_hw_statistics(&port->stats_work.work);
2124
2125 mutex_lock(&port->gather_stats_lock);
2126 memcpy(data, port->ethtool_stats,
2127 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2128 mutex_unlock(&port->gather_stats_lock);
2129 }
2130
2131 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2132 {
2133 struct mvpp2_port *port = netdev_priv(dev);
2134
2135 if (sset == ETH_SS_STATS)
2136 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2137
2138 return -EOPNOTSUPP;
2139 }
2140
2141 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2142 {
2143 u32 val;
2144
2145 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2146 MVPP2_GMAC_PORT_RESET_MASK;
2147 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2148
2149 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2150 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2151 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2152 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2153 }
2154 }
2155
2156 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
2157 {
2158 struct mvpp2 *priv = port->priv;
2159 void __iomem *mpcs, *xpcs;
2160 u32 val;
2161
2162 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2163 return;
2164
2165 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2166 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2167
2168 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2169 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
2170 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
2171 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2172
2173 val = readl(xpcs + MVPP22_XPCS_CFG0);
2174 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2175 }
2176
2177 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
2178 phy_interface_t interface)
2179 {
2180 struct mvpp2 *priv = port->priv;
2181 void __iomem *mpcs, *xpcs;
2182 u32 val;
2183
2184 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2185 return;
2186
2187 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2188 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2189
2190 switch (interface) {
2191 case PHY_INTERFACE_MODE_5GBASER:
2192 case PHY_INTERFACE_MODE_10GBASER:
2193 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2194 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
2195 MAC_CLK_RESET_SD_TX;
2196 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
2197 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2198 break;
2199 case PHY_INTERFACE_MODE_XAUI:
2200 case PHY_INTERFACE_MODE_RXAUI:
2201 val = readl(xpcs + MVPP22_XPCS_CFG0);
2202 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2203 break;
2204 default:
2205 break;
2206 }
2207 }
2208
2209
2210 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2211 {
2212 u32 val;
2213
2214 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2215 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2216 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2217 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2218 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2219 }
2220
2221
2222 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2223 {
2224 u32 val;
2225
2226 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
2227 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2228 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2229 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2230 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2231 }
2232
2233
2234 static void mvpp2_defaults_set(struct mvpp2_port *port)
2235 {
2236 int tx_port_num, val, queue, lrxq;
2237
2238 if (port->priv->hw_version == MVPP21) {
2239
2240 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2241 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2242
2243 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2244 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2245 }
2246
2247
2248 tx_port_num = mvpp2_egress_port(port);
2249 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2250 tx_port_num);
2251 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2252
2253
2254 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
2255
2256
2257 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2258 mvpp2_write(port->priv,
2259 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2260
2261
2262
2263
2264 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2265 port->priv->tclk / USEC_PER_SEC);
2266 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2267 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2268 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2269 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2270 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2271 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2272 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2273
2274
2275 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2276 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2277 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2278
2279
2280 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2281 queue = port->rxqs[lrxq]->id;
2282 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2283 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2284 MVPP2_SNOOP_BUF_HDR_MASK;
2285 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2286 }
2287
2288
2289 mvpp2_interrupts_disable(port);
2290 }
2291
2292
2293 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2294 {
2295 u32 val;
2296 int lrxq, queue;
2297
2298 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2299 queue = port->rxqs[lrxq]->id;
2300 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2301 val &= ~MVPP2_RXQ_DISABLE_MASK;
2302 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2303 }
2304 }
2305
2306 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2307 {
2308 u32 val;
2309 int lrxq, queue;
2310
2311 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2312 queue = port->rxqs[lrxq]->id;
2313 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2314 val |= MVPP2_RXQ_DISABLE_MASK;
2315 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2316 }
2317 }
2318
2319
2320
2321
2322 static void mvpp2_egress_enable(struct mvpp2_port *port)
2323 {
2324 u32 qmap;
2325 int queue;
2326 int tx_port_num = mvpp2_egress_port(port);
2327
2328
2329 qmap = 0;
2330 for (queue = 0; queue < port->ntxqs; queue++) {
2331 struct mvpp2_tx_queue *txq = port->txqs[queue];
2332
2333 if (txq->descs)
2334 qmap |= (1 << queue);
2335 }
2336
2337 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2338 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2339 }
2340
2341
2342
2343
2344 static void mvpp2_egress_disable(struct mvpp2_port *port)
2345 {
2346 u32 reg_data;
2347 int delay;
2348 int tx_port_num = mvpp2_egress_port(port);
2349
2350
2351 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2352 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2353 MVPP2_TXP_SCHED_ENQ_MASK;
2354 if (reg_data != 0)
2355 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2356 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2357
2358
2359 delay = 0;
2360 do {
2361 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2362 netdev_warn(port->dev,
2363 "Tx stop timed out, status=0x%08x\n",
2364 reg_data);
2365 break;
2366 }
2367 mdelay(1);
2368 delay++;
2369
2370
2371
2372
2373 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2374 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2375 }
2376
2377
2378
2379
2380 static inline int
2381 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2382 {
2383 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2384
2385 return val & MVPP2_RXQ_OCCUPIED_MASK;
2386 }
2387
2388
2389
2390
2391 static inline void
2392 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2393 int used_count, int free_count)
2394 {
2395
2396
2397
2398 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2399
2400 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2401 }
2402
2403
2404 static inline struct mvpp2_rx_desc *
2405 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2406 {
2407 int rx_desc = rxq->next_desc_to_proc;
2408
2409 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2410 prefetch(rxq->descs + rxq->next_desc_to_proc);
2411 return rxq->descs + rx_desc;
2412 }
2413
2414
2415 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2416 int prxq, int offset)
2417 {
2418 u32 val;
2419
2420
2421 offset = offset >> 5;
2422
2423 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2424 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2425
2426
2427 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2428 MVPP2_RXQ_PACKET_OFFSET_MASK);
2429
2430 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2431 }
2432
2433
2434
2435
2436 static struct mvpp2_tx_desc *
2437 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2438 {
2439 int tx_desc = txq->next_desc_to_proc;
2440
2441 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2442 return txq->descs + tx_desc;
2443 }
2444
2445
2446
2447
2448
2449
2450 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2451 {
2452
2453 mvpp2_thread_write(port->priv,
2454 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2455 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2456 }
2457
2458
2459
2460
2461
2462
2463
2464 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2465 struct mvpp2_tx_queue *aggr_txq, int num)
2466 {
2467 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2468
2469 unsigned int thread =
2470 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2471 u32 val = mvpp2_read_relaxed(port->priv,
2472 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2473
2474 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2475
2476 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2477 return -ENOMEM;
2478 }
2479 return 0;
2480 }
2481
2482
2483
2484
2485
2486
2487
2488 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2489 struct mvpp2_tx_queue *txq, int num)
2490 {
2491 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2492 struct mvpp2 *priv = port->priv;
2493 u32 val;
2494
2495 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2496 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2497
2498 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2499
2500 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2501 }
2502
2503
2504
2505
2506 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2507 struct mvpp2_tx_queue *txq,
2508 struct mvpp2_txq_pcpu *txq_pcpu,
2509 int num)
2510 {
2511 int req, desc_count;
2512 unsigned int thread;
2513
2514 if (txq_pcpu->reserved_num >= num)
2515 return 0;
2516
2517
2518
2519
2520
2521 desc_count = 0;
2522
2523 for (thread = 0; thread < port->priv->nthreads; thread++) {
2524 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2525
2526 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2527 desc_count += txq_pcpu_aux->count;
2528 desc_count += txq_pcpu_aux->reserved_num;
2529 }
2530
2531 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2532 desc_count += req;
2533
2534 if (desc_count >
2535 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2536 return -ENOMEM;
2537
2538 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2539
2540
2541 if (txq_pcpu->reserved_num < num)
2542 return -ENOMEM;
2543 return 0;
2544 }
2545
2546
2547
2548
2549 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2550 {
2551 if (txq->next_desc_to_proc == 0)
2552 txq->next_desc_to_proc = txq->last_desc - 1;
2553 else
2554 txq->next_desc_to_proc--;
2555 }
2556
2557
2558 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2559 int ip_hdr_len, int l4_proto)
2560 {
2561 u32 command;
2562
2563
2564
2565
2566 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2567 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2568 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2569
2570 if (l3_proto == htons(ETH_P_IP)) {
2571 command &= ~MVPP2_TXD_IP_CSUM_DISABLE;
2572 command &= ~MVPP2_TXD_L3_IP6;
2573 } else {
2574 command |= MVPP2_TXD_L3_IP6;
2575 }
2576
2577 if (l4_proto == IPPROTO_TCP) {
2578 command &= ~MVPP2_TXD_L4_UDP;
2579 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2580 } else if (l4_proto == IPPROTO_UDP) {
2581 command |= MVPP2_TXD_L4_UDP;
2582 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2583 } else {
2584 command |= MVPP2_TXD_L4_CSUM_NOT;
2585 }
2586
2587 return command;
2588 }
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2599 struct mvpp2_tx_queue *txq)
2600 {
2601 u32 val;
2602
2603
2604 val = mvpp2_thread_read_relaxed(port->priv,
2605 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2606 MVPP2_TXQ_SENT_REG(txq->id));
2607
2608 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2609 MVPP2_TRANSMITTED_COUNT_OFFSET;
2610 }
2611
2612
2613
2614
2615 static void mvpp2_txq_sent_counter_clear(void *arg)
2616 {
2617 struct mvpp2_port *port = arg;
2618 int queue;
2619
2620
2621 if (smp_processor_id() >= port->priv->nthreads)
2622 return;
2623
2624 for (queue = 0; queue < port->ntxqs; queue++) {
2625 int id = port->txqs[queue]->id;
2626
2627 mvpp2_thread_read(port->priv,
2628 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2629 MVPP2_TXQ_SENT_REG(id));
2630 }
2631 }
2632
2633
2634 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2635 {
2636 u32 val, size, mtu;
2637 int txq, tx_port_num;
2638
2639 mtu = port->pkt_size * 8;
2640 if (mtu > MVPP2_TXP_MTU_MAX)
2641 mtu = MVPP2_TXP_MTU_MAX;
2642
2643
2644 mtu = 3 * mtu;
2645
2646
2647 tx_port_num = mvpp2_egress_port(port);
2648 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2649
2650
2651 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2652 val &= ~MVPP2_TXP_MTU_MAX;
2653 val |= mtu;
2654 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2655
2656
2657 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2658 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2659 if (size < mtu) {
2660 size = mtu;
2661 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2662 val |= size;
2663 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2664 }
2665
2666 for (txq = 0; txq < port->ntxqs; txq++) {
2667 val = mvpp2_read(port->priv,
2668 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2669 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2670
2671 if (size < mtu) {
2672 size = mtu;
2673 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2674 val |= size;
2675 mvpp2_write(port->priv,
2676 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2677 val);
2678 }
2679 }
2680 }
2681
2682
2683 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2684 struct mvpp2_rx_queue *rxq)
2685 {
2686 u32 val;
2687
2688 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2689
2690 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2691 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2692 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2693 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2694 }
2695
2696
2697
2698
2699 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2700 struct mvpp2_rx_queue *rxq)
2701 {
2702 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2703
2704 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2705 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2706
2707 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2708 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2709 rxq->pkts_coal);
2710
2711 put_cpu();
2712 }
2713
2714
2715 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2716 struct mvpp2_tx_queue *txq)
2717 {
2718 unsigned int thread;
2719 u32 val;
2720
2721 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2722 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2723
2724 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2725
2726 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2727 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2728 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2729 }
2730 }
2731
2732 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2733 {
2734 u64 tmp = (u64)clk_hz * usec;
2735
2736 do_div(tmp, USEC_PER_SEC);
2737
2738 return tmp > U32_MAX ? U32_MAX : tmp;
2739 }
2740
2741 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2742 {
2743 u64 tmp = (u64)cycles * USEC_PER_SEC;
2744
2745 do_div(tmp, clk_hz);
2746
2747 return tmp > U32_MAX ? U32_MAX : tmp;
2748 }
2749
2750
2751 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2752 struct mvpp2_rx_queue *rxq)
2753 {
2754 unsigned long freq = port->priv->tclk;
2755 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2756
2757 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2758 rxq->time_coal =
2759 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2760
2761
2762 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2763 }
2764
2765 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2766 }
2767
2768 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2769 {
2770 unsigned long freq = port->priv->tclk;
2771 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2772
2773 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2774 port->tx_time_coal =
2775 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2776
2777
2778 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2779 }
2780
2781 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2782 }
2783
2784
2785 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2786 struct mvpp2_tx_queue *txq,
2787 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2788 {
2789 struct xdp_frame_bulk bq;
2790 int i;
2791
2792 xdp_frame_bulk_init(&bq);
2793
2794 rcu_read_lock();
2795
2796 for (i = 0; i < num; i++) {
2797 struct mvpp2_txq_pcpu_buf *tx_buf =
2798 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2799
2800 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2801 tx_buf->type != MVPP2_TYPE_XDP_TX)
2802 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2803 tx_buf->size, DMA_TO_DEVICE);
2804 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2805 dev_kfree_skb_any(tx_buf->skb);
2806 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2807 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2808 xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2809
2810 mvpp2_txq_inc_get(txq_pcpu);
2811 }
2812 xdp_flush_frame_bulk(&bq);
2813
2814 rcu_read_unlock();
2815 }
2816
2817 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2818 u32 cause)
2819 {
2820 int queue = fls(cause) - 1;
2821
2822 return port->rxqs[queue];
2823 }
2824
2825 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2826 u32 cause)
2827 {
2828 int queue = fls(cause) - 1;
2829
2830 return port->txqs[queue];
2831 }
2832
2833
2834 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2835 struct mvpp2_txq_pcpu *txq_pcpu)
2836 {
2837 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2838 int tx_done;
2839
2840 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2841 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2842
2843 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2844 if (!tx_done)
2845 return;
2846 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2847
2848 txq_pcpu->count -= tx_done;
2849
2850 if (netif_tx_queue_stopped(nq))
2851 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2852 netif_tx_wake_queue(nq);
2853 }
2854
2855 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2856 unsigned int thread)
2857 {
2858 struct mvpp2_tx_queue *txq;
2859 struct mvpp2_txq_pcpu *txq_pcpu;
2860 unsigned int tx_todo = 0;
2861
2862 while (cause) {
2863 txq = mvpp2_get_tx_queue(port, cause);
2864 if (!txq)
2865 break;
2866
2867 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2868
2869 if (txq_pcpu->count) {
2870 mvpp2_txq_done(port, txq, txq_pcpu);
2871 tx_todo += txq_pcpu->count;
2872 }
2873
2874 cause &= ~(1 << txq->log_id);
2875 }
2876 return tx_todo;
2877 }
2878
2879
2880
2881
2882 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2883 struct mvpp2_tx_queue *aggr_txq,
2884 unsigned int thread, struct mvpp2 *priv)
2885 {
2886 u32 txq_dma;
2887
2888
2889 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2890 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2891 &aggr_txq->descs_dma, GFP_KERNEL);
2892 if (!aggr_txq->descs)
2893 return -ENOMEM;
2894
2895 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2896
2897
2898 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2899 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2900
2901
2902
2903
2904 if (priv->hw_version == MVPP21)
2905 txq_dma = aggr_txq->descs_dma;
2906 else
2907 txq_dma = aggr_txq->descs_dma >>
2908 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2909
2910 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2911 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2912 MVPP2_AGGR_TXQ_SIZE);
2913
2914 return 0;
2915 }
2916
2917
2918 static int mvpp2_rxq_init(struct mvpp2_port *port,
2919 struct mvpp2_rx_queue *rxq)
2920 {
2921 struct mvpp2 *priv = port->priv;
2922 unsigned int thread;
2923 u32 rxq_dma;
2924 int err;
2925
2926 rxq->size = port->rx_ring_size;
2927
2928
2929 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2930 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2931 &rxq->descs_dma, GFP_KERNEL);
2932 if (!rxq->descs)
2933 return -ENOMEM;
2934
2935 rxq->last_desc = rxq->size - 1;
2936
2937
2938 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2939
2940
2941 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2942 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2943 if (port->priv->hw_version == MVPP21)
2944 rxq_dma = rxq->descs_dma;
2945 else
2946 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2947 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2948 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2949 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2950 put_cpu();
2951
2952
2953 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2954
2955
2956 mvpp2_rx_pkts_coal_set(port, rxq);
2957 mvpp2_rx_time_coal_set(port, rxq);
2958
2959
2960 mvpp2_set_rxq_free_tresh(port, rxq);
2961
2962
2963 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2964
2965 if (priv->percpu_pools) {
2966 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
2967 if (err < 0)
2968 goto err_free_dma;
2969
2970 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
2971 if (err < 0)
2972 goto err_unregister_rxq_short;
2973
2974
2975 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2976 MEM_TYPE_PAGE_POOL,
2977 priv->page_pool[rxq->logic_rxq]);
2978 if (err < 0)
2979 goto err_unregister_rxq_long;
2980
2981 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2982 MEM_TYPE_PAGE_POOL,
2983 priv->page_pool[rxq->logic_rxq +
2984 port->nrxqs]);
2985 if (err < 0)
2986 goto err_unregister_mem_rxq_short;
2987 }
2988
2989 return 0;
2990
2991 err_unregister_mem_rxq_short:
2992 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2993 err_unregister_rxq_long:
2994 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2995 err_unregister_rxq_short:
2996 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2997 err_free_dma:
2998 dma_free_coherent(port->dev->dev.parent,
2999 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3000 rxq->descs, rxq->descs_dma);
3001 return err;
3002 }
3003
3004
3005 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3006 struct mvpp2_rx_queue *rxq)
3007 {
3008 int rx_received, i;
3009
3010 rx_received = mvpp2_rxq_received(port, rxq->id);
3011 if (!rx_received)
3012 return;
3013
3014 for (i = 0; i < rx_received; i++) {
3015 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3016 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3017 int pool;
3018
3019 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3020 MVPP2_RXD_BM_POOL_ID_OFFS;
3021
3022 mvpp2_bm_pool_put(port, pool,
3023 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3024 mvpp2_rxdesc_cookie_get(port, rx_desc));
3025 }
3026 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3027 }
3028
3029
3030 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3031 struct mvpp2_rx_queue *rxq)
3032 {
3033 unsigned int thread;
3034
3035 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3036 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3037
3038 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3039 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3040
3041 mvpp2_rxq_drop_pkts(port, rxq);
3042
3043 if (rxq->descs)
3044 dma_free_coherent(port->dev->dev.parent,
3045 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3046 rxq->descs,
3047 rxq->descs_dma);
3048
3049 rxq->descs = NULL;
3050 rxq->last_desc = 0;
3051 rxq->next_desc_to_proc = 0;
3052 rxq->descs_dma = 0;
3053
3054
3055
3056
3057 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3058 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3059 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
3060 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
3061 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3062 put_cpu();
3063 }
3064
3065
3066 static int mvpp2_txq_init(struct mvpp2_port *port,
3067 struct mvpp2_tx_queue *txq)
3068 {
3069 u32 val;
3070 unsigned int thread;
3071 int desc, desc_per_txq, tx_port_num;
3072 struct mvpp2_txq_pcpu *txq_pcpu;
3073
3074 txq->size = port->tx_ring_size;
3075
3076
3077 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3078 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3079 &txq->descs_dma, GFP_KERNEL);
3080 if (!txq->descs)
3081 return -ENOMEM;
3082
3083 txq->last_desc = txq->size - 1;
3084
3085
3086 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3087 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3088 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3089 txq->descs_dma);
3090 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3091 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
3092 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
3093 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3094 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3095 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3096 val &= ~MVPP2_TXQ_PENDING_MASK;
3097 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3098
3099
3100
3101
3102
3103
3104 desc_per_txq = 16;
3105 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3106 (txq->log_id * desc_per_txq);
3107
3108 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3109 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3110 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3111 put_cpu();
3112
3113
3114 tx_port_num = mvpp2_egress_port(port);
3115 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3116
3117 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3118 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3119 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3120 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3121 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3122
3123 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3124 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3125 val);
3126
3127 for (thread = 0; thread < port->priv->nthreads; thread++) {
3128 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3129 txq_pcpu->size = txq->size;
3130 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3131 sizeof(*txq_pcpu->buffs),
3132 GFP_KERNEL);
3133 if (!txq_pcpu->buffs)
3134 return -ENOMEM;
3135
3136 txq_pcpu->count = 0;
3137 txq_pcpu->reserved_num = 0;
3138 txq_pcpu->txq_put_index = 0;
3139 txq_pcpu->txq_get_index = 0;
3140 txq_pcpu->tso_headers = NULL;
3141
3142 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3143 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3144
3145 txq_pcpu->tso_headers =
3146 dma_alloc_coherent(port->dev->dev.parent,
3147 txq_pcpu->size * TSO_HEADER_SIZE,
3148 &txq_pcpu->tso_headers_dma,
3149 GFP_KERNEL);
3150 if (!txq_pcpu->tso_headers)
3151 return -ENOMEM;
3152 }
3153
3154 return 0;
3155 }
3156
3157
3158 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3159 struct mvpp2_tx_queue *txq)
3160 {
3161 struct mvpp2_txq_pcpu *txq_pcpu;
3162 unsigned int thread;
3163
3164 for (thread = 0; thread < port->priv->nthreads; thread++) {
3165 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3166 kfree(txq_pcpu->buffs);
3167
3168 if (txq_pcpu->tso_headers)
3169 dma_free_coherent(port->dev->dev.parent,
3170 txq_pcpu->size * TSO_HEADER_SIZE,
3171 txq_pcpu->tso_headers,
3172 txq_pcpu->tso_headers_dma);
3173
3174 txq_pcpu->tso_headers = NULL;
3175 }
3176
3177 if (txq->descs)
3178 dma_free_coherent(port->dev->dev.parent,
3179 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3180 txq->descs, txq->descs_dma);
3181
3182 txq->descs = NULL;
3183 txq->last_desc = 0;
3184 txq->next_desc_to_proc = 0;
3185 txq->descs_dma = 0;
3186
3187
3188 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3189
3190
3191 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3192 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3193 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
3194 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3195 put_cpu();
3196 }
3197
3198
3199 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3200 {
3201 struct mvpp2_txq_pcpu *txq_pcpu;
3202 int delay, pending;
3203 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3204 u32 val;
3205
3206 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3207 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3208 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3209 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3210
3211
3212
3213
3214 delay = 0;
3215 do {
3216 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3217 netdev_warn(port->dev,
3218 "port %d: cleaning queue %d timed out\n",
3219 port->id, txq->log_id);
3220 break;
3221 }
3222 mdelay(1);
3223 delay++;
3224
3225 pending = mvpp2_thread_read(port->priv, thread,
3226 MVPP2_TXQ_PENDING_REG);
3227 pending &= MVPP2_TXQ_PENDING_MASK;
3228 } while (pending);
3229
3230 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3231 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3232 put_cpu();
3233
3234 for (thread = 0; thread < port->priv->nthreads; thread++) {
3235 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3236
3237
3238 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3239
3240
3241 txq_pcpu->count = 0;
3242 txq_pcpu->txq_put_index = 0;
3243 txq_pcpu->txq_get_index = 0;
3244 }
3245 }
3246
3247
3248 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3249 {
3250 struct mvpp2_tx_queue *txq;
3251 int queue;
3252 u32 val;
3253
3254 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3255
3256
3257 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3258 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3259
3260 for (queue = 0; queue < port->ntxqs; queue++) {
3261 txq = port->txqs[queue];
3262 mvpp2_txq_clean(port, txq);
3263 mvpp2_txq_deinit(port, txq);
3264 }
3265
3266 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3267
3268 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3269 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3270 }
3271
3272
3273 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3274 {
3275 int queue;
3276
3277 for (queue = 0; queue < port->nrxqs; queue++)
3278 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3279
3280 if (port->tx_fc)
3281 mvpp2_rxq_disable_fc(port);
3282 }
3283
3284
3285 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3286 {
3287 int queue, err;
3288
3289 for (queue = 0; queue < port->nrxqs; queue++) {
3290 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3291 if (err)
3292 goto err_cleanup;
3293 }
3294
3295 if (port->tx_fc)
3296 mvpp2_rxq_enable_fc(port);
3297
3298 return 0;
3299
3300 err_cleanup:
3301 mvpp2_cleanup_rxqs(port);
3302 return err;
3303 }
3304
3305
3306 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3307 {
3308 struct mvpp2_tx_queue *txq;
3309 int queue, err;
3310
3311 for (queue = 0; queue < port->ntxqs; queue++) {
3312 txq = port->txqs[queue];
3313 err = mvpp2_txq_init(port, txq);
3314 if (err)
3315 goto err_cleanup;
3316
3317
3318 if (queue < num_possible_cpus())
3319 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3320 }
3321
3322 if (port->has_tx_irqs) {
3323 mvpp2_tx_time_coal_set(port);
3324 for (queue = 0; queue < port->ntxqs; queue++) {
3325 txq = port->txqs[queue];
3326 mvpp2_tx_pkts_coal_set(port, txq);
3327 }
3328 }
3329
3330 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3331 return 0;
3332
3333 err_cleanup:
3334 mvpp2_cleanup_txqs(port);
3335 return err;
3336 }
3337
3338
3339 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3340 {
3341 struct mvpp2_queue_vector *qv = dev_id;
3342
3343 mvpp2_qvec_interrupt_disable(qv);
3344
3345 napi_schedule(&qv->napi);
3346
3347 return IRQ_HANDLED;
3348 }
3349
3350 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3351 {
3352 struct skb_shared_hwtstamps shhwtstamps;
3353 struct mvpp2_hwtstamp_queue *queue;
3354 struct sk_buff *skb;
3355 void __iomem *ptp_q;
3356 unsigned int id;
3357 u32 r0, r1, r2;
3358
3359 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3360 if (nq)
3361 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3362
3363 queue = &port->tx_hwtstamp_queue[nq];
3364
3365 while (1) {
3366 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3367 if (!r0)
3368 break;
3369
3370 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3371 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3372
3373 id = (r0 >> 1) & 31;
3374
3375 skb = queue->skb[id];
3376 queue->skb[id] = NULL;
3377 if (skb) {
3378 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3379
3380 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3381 skb_tstamp_tx(skb, &shhwtstamps);
3382 dev_kfree_skb_any(skb);
3383 }
3384 }
3385 }
3386
3387 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3388 {
3389 void __iomem *ptp;
3390 u32 val;
3391
3392 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3393 val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3394 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3395 mvpp2_isr_handle_ptp_queue(port, 0);
3396 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3397 mvpp2_isr_handle_ptp_queue(port, 1);
3398 }
3399
3400 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3401 {
3402 struct net_device *dev = port->dev;
3403
3404 if (port->phylink) {
3405 phylink_mac_change(port->phylink, link);
3406 return;
3407 }
3408
3409 if (!netif_running(dev))
3410 return;
3411
3412 if (link) {
3413 mvpp2_interrupts_enable(port);
3414
3415 mvpp2_egress_enable(port);
3416 mvpp2_ingress_enable(port);
3417 netif_carrier_on(dev);
3418 netif_tx_wake_all_queues(dev);
3419 } else {
3420 netif_tx_stop_all_queues(dev);
3421 netif_carrier_off(dev);
3422 mvpp2_ingress_disable(port);
3423 mvpp2_egress_disable(port);
3424
3425 mvpp2_interrupts_disable(port);
3426 }
3427 }
3428
3429 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3430 {
3431 bool link;
3432 u32 val;
3433
3434 val = readl(port->base + MVPP22_XLG_INT_STAT);
3435 if (val & MVPP22_XLG_INT_STAT_LINK) {
3436 val = readl(port->base + MVPP22_XLG_STATUS);
3437 link = (val & MVPP22_XLG_STATUS_LINK_UP);
3438 mvpp2_isr_handle_link(port, link);
3439 }
3440 }
3441
3442 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3443 {
3444 bool link;
3445 u32 val;
3446
3447 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3448 phy_interface_mode_is_8023z(port->phy_interface) ||
3449 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3450 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3451 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3452 val = readl(port->base + MVPP2_GMAC_STATUS0);
3453 link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3454 mvpp2_isr_handle_link(port, link);
3455 }
3456 }
3457 }
3458
3459
3460 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3461 {
3462 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3463 u32 val;
3464
3465 mvpp22_gop_mask_irq(port);
3466
3467 if (mvpp2_port_supports_xlg(port) &&
3468 mvpp2_is_xlg(port->phy_interface)) {
3469
3470 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3471 if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3472 mvpp2_isr_handle_xlg(port);
3473 if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3474 mvpp2_isr_handle_ptp(port);
3475 } else {
3476
3477
3478
3479 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3480 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3481 mvpp2_isr_handle_gmac_internal(port);
3482 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3483 mvpp2_isr_handle_ptp(port);
3484 }
3485
3486 mvpp22_gop_unmask_irq(port);
3487 return IRQ_HANDLED;
3488 }
3489
3490 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3491 {
3492 struct net_device *dev;
3493 struct mvpp2_port *port;
3494 struct mvpp2_port_pcpu *port_pcpu;
3495 unsigned int tx_todo, cause;
3496
3497 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3498 dev = port_pcpu->dev;
3499
3500 if (!netif_running(dev))
3501 return HRTIMER_NORESTART;
3502
3503 port_pcpu->timer_scheduled = false;
3504 port = netdev_priv(dev);
3505
3506
3507 cause = (1 << port->ntxqs) - 1;
3508 tx_todo = mvpp2_tx_done(port, cause,
3509 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3510
3511
3512 if (tx_todo && !port_pcpu->timer_scheduled) {
3513 port_pcpu->timer_scheduled = true;
3514 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3515 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3516
3517 return HRTIMER_RESTART;
3518 }
3519 return HRTIMER_NORESTART;
3520 }
3521
3522
3523
3524
3525 static void mvpp2_rx_error(struct mvpp2_port *port,
3526 struct mvpp2_rx_desc *rx_desc)
3527 {
3528 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3529 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3530 char *err_str = NULL;
3531
3532 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3533 case MVPP2_RXD_ERR_CRC:
3534 err_str = "crc";
3535 break;
3536 case MVPP2_RXD_ERR_OVERRUN:
3537 err_str = "overrun";
3538 break;
3539 case MVPP2_RXD_ERR_RESOURCE:
3540 err_str = "resource";
3541 break;
3542 }
3543 if (err_str && net_ratelimit())
3544 netdev_err(port->dev,
3545 "bad rx status %08x (%s error), size=%zu\n",
3546 status, err_str, sz);
3547 }
3548
3549
3550 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3551 {
3552 if (((status & MVPP2_RXD_L3_IP4) &&
3553 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3554 (status & MVPP2_RXD_L3_IP6))
3555 if (((status & MVPP2_RXD_L4_UDP) ||
3556 (status & MVPP2_RXD_L4_TCP)) &&
3557 (status & MVPP2_RXD_L4_CSUM_OK))
3558 return CHECKSUM_UNNECESSARY;
3559
3560 return CHECKSUM_NONE;
3561 }
3562
3563
3564 static int mvpp2_rx_refill(struct mvpp2_port *port,
3565 struct mvpp2_bm_pool *bm_pool,
3566 struct page_pool *page_pool, int pool)
3567 {
3568 dma_addr_t dma_addr;
3569 phys_addr_t phys_addr;
3570 void *buf;
3571
3572 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3573 &dma_addr, &phys_addr, GFP_ATOMIC);
3574 if (!buf)
3575 return -ENOMEM;
3576
3577 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3578
3579 return 0;
3580 }
3581
3582
3583 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3584 {
3585 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3586 int ip_hdr_len = 0;
3587 u8 l4_proto;
3588 __be16 l3_proto = vlan_get_protocol(skb);
3589
3590 if (l3_proto == htons(ETH_P_IP)) {
3591 struct iphdr *ip4h = ip_hdr(skb);
3592
3593
3594 ip_hdr_len = ip4h->ihl;
3595 l4_proto = ip4h->protocol;
3596 } else if (l3_proto == htons(ETH_P_IPV6)) {
3597 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3598
3599
3600 if (skb_network_header_len(skb) > 0)
3601 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3602 l4_proto = ip6h->nexthdr;
3603 } else {
3604 return MVPP2_TXD_L4_CSUM_NOT;
3605 }
3606
3607 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3608 l3_proto, ip_hdr_len, l4_proto);
3609 }
3610
3611 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3612 }
3613
3614 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3615 {
3616 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3617 struct mvpp2_tx_queue *aggr_txq;
3618 struct mvpp2_txq_pcpu *txq_pcpu;
3619 struct mvpp2_tx_queue *txq;
3620 struct netdev_queue *nq;
3621
3622 txq = port->txqs[txq_id];
3623 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3624 nq = netdev_get_tx_queue(port->dev, txq_id);
3625 aggr_txq = &port->priv->aggr_txqs[thread];
3626
3627 txq_pcpu->reserved_num -= nxmit;
3628 txq_pcpu->count += nxmit;
3629 aggr_txq->count += nxmit;
3630
3631
3632 wmb();
3633 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3634
3635 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3636 netif_tx_stop_queue(nq);
3637
3638
3639 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3640 mvpp2_txq_done(port, txq, txq_pcpu);
3641 }
3642
3643 static int
3644 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3645 struct xdp_frame *xdpf, bool dma_map)
3646 {
3647 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3648 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3649 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3650 enum mvpp2_tx_buf_type buf_type;
3651 struct mvpp2_txq_pcpu *txq_pcpu;
3652 struct mvpp2_tx_queue *aggr_txq;
3653 struct mvpp2_tx_desc *tx_desc;
3654 struct mvpp2_tx_queue *txq;
3655 int ret = MVPP2_XDP_TX;
3656 dma_addr_t dma_addr;
3657
3658 txq = port->txqs[txq_id];
3659 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3660 aggr_txq = &port->priv->aggr_txqs[thread];
3661
3662
3663 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3664 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3665 ret = MVPP2_XDP_DROPPED;
3666 goto out;
3667 }
3668
3669
3670 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3671 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3672 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3673
3674 if (dma_map) {
3675
3676 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3677 xdpf->len, DMA_TO_DEVICE);
3678
3679 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3680 mvpp2_txq_desc_put(txq);
3681 ret = MVPP2_XDP_DROPPED;
3682 goto out;
3683 }
3684
3685 buf_type = MVPP2_TYPE_XDP_NDO;
3686 } else {
3687
3688 struct page *page = virt_to_page(xdpf->data);
3689
3690 dma_addr = page_pool_get_dma_addr(page) +
3691 sizeof(*xdpf) + xdpf->headroom;
3692 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3693 xdpf->len, DMA_BIDIRECTIONAL);
3694
3695 buf_type = MVPP2_TYPE_XDP_TX;
3696 }
3697
3698 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3699
3700 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3701 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3702
3703 out:
3704 return ret;
3705 }
3706
3707 static int
3708 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3709 {
3710 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3711 struct xdp_frame *xdpf;
3712 u16 txq_id;
3713 int ret;
3714
3715 xdpf = xdp_convert_buff_to_frame(xdp);
3716 if (unlikely(!xdpf))
3717 return MVPP2_XDP_DROPPED;
3718
3719
3720
3721
3722 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3723
3724 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3725 if (ret == MVPP2_XDP_TX) {
3726 u64_stats_update_begin(&stats->syncp);
3727 stats->tx_bytes += xdpf->len;
3728 stats->tx_packets++;
3729 stats->xdp_tx++;
3730 u64_stats_update_end(&stats->syncp);
3731
3732 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3733 } else {
3734 u64_stats_update_begin(&stats->syncp);
3735 stats->xdp_tx_err++;
3736 u64_stats_update_end(&stats->syncp);
3737 }
3738
3739 return ret;
3740 }
3741
3742 static int
3743 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3744 struct xdp_frame **frames, u32 flags)
3745 {
3746 struct mvpp2_port *port = netdev_priv(dev);
3747 int i, nxmit_byte = 0, nxmit = 0;
3748 struct mvpp2_pcpu_stats *stats;
3749 u16 txq_id;
3750 u32 ret;
3751
3752 if (unlikely(test_bit(0, &port->state)))
3753 return -ENETDOWN;
3754
3755 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3756 return -EINVAL;
3757
3758
3759
3760
3761 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3762
3763 for (i = 0; i < num_frame; i++) {
3764 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3765 if (ret != MVPP2_XDP_TX)
3766 break;
3767
3768 nxmit_byte += frames[i]->len;
3769 nxmit++;
3770 }
3771
3772 if (likely(nxmit > 0))
3773 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3774
3775 stats = this_cpu_ptr(port->stats);
3776 u64_stats_update_begin(&stats->syncp);
3777 stats->tx_bytes += nxmit_byte;
3778 stats->tx_packets += nxmit;
3779 stats->xdp_xmit += nxmit;
3780 stats->xdp_xmit_err += num_frame - nxmit;
3781 u64_stats_update_end(&stats->syncp);
3782
3783 return nxmit;
3784 }
3785
3786 static int
3787 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3788 struct xdp_buff *xdp, struct page_pool *pp,
3789 struct mvpp2_pcpu_stats *stats)
3790 {
3791 unsigned int len, sync, err;
3792 struct page *page;
3793 u32 ret, act;
3794
3795 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3796 act = bpf_prog_run_xdp(prog, xdp);
3797
3798
3799 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3800 sync = max(sync, len);
3801
3802 switch (act) {
3803 case XDP_PASS:
3804 stats->xdp_pass++;
3805 ret = MVPP2_XDP_PASS;
3806 break;
3807 case XDP_REDIRECT:
3808 err = xdp_do_redirect(port->dev, xdp, prog);
3809 if (unlikely(err)) {
3810 ret = MVPP2_XDP_DROPPED;
3811 page = virt_to_head_page(xdp->data);
3812 page_pool_put_page(pp, page, sync, true);
3813 } else {
3814 ret = MVPP2_XDP_REDIR;
3815 stats->xdp_redirect++;
3816 }
3817 break;
3818 case XDP_TX:
3819 ret = mvpp2_xdp_xmit_back(port, xdp);
3820 if (ret != MVPP2_XDP_TX) {
3821 page = virt_to_head_page(xdp->data);
3822 page_pool_put_page(pp, page, sync, true);
3823 }
3824 break;
3825 default:
3826 bpf_warn_invalid_xdp_action(port->dev, prog, act);
3827 fallthrough;
3828 case XDP_ABORTED:
3829 trace_xdp_exception(port->dev, prog, act);
3830 fallthrough;
3831 case XDP_DROP:
3832 page = virt_to_head_page(xdp->data);
3833 page_pool_put_page(pp, page, sync, true);
3834 ret = MVPP2_XDP_DROPPED;
3835 stats->xdp_drop++;
3836 break;
3837 }
3838
3839 return ret;
3840 }
3841
3842 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3843 int pool, u32 rx_status)
3844 {
3845 phys_addr_t phys_addr, phys_addr_next;
3846 dma_addr_t dma_addr, dma_addr_next;
3847 struct mvpp2_buff_hdr *buff_hdr;
3848
3849 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3850 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3851
3852 do {
3853 buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3854
3855 phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3856 dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3857
3858 if (port->priv->hw_version >= MVPP22) {
3859 phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3860 dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3861 }
3862
3863 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3864
3865 phys_addr = phys_addr_next;
3866 dma_addr = dma_addr_next;
3867
3868 } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3869 }
3870
3871
3872 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3873 int rx_todo, struct mvpp2_rx_queue *rxq)
3874 {
3875 struct net_device *dev = port->dev;
3876 struct mvpp2_pcpu_stats ps = {};
3877 enum dma_data_direction dma_dir;
3878 struct bpf_prog *xdp_prog;
3879 struct xdp_buff xdp;
3880 int rx_received;
3881 int rx_done = 0;
3882 u32 xdp_ret = 0;
3883
3884 xdp_prog = READ_ONCE(port->xdp_prog);
3885
3886
3887 rx_received = mvpp2_rxq_received(port, rxq->id);
3888 if (rx_todo > rx_received)
3889 rx_todo = rx_received;
3890
3891 while (rx_done < rx_todo) {
3892 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3893 struct mvpp2_bm_pool *bm_pool;
3894 struct page_pool *pp = NULL;
3895 struct sk_buff *skb;
3896 unsigned int frag_size;
3897 dma_addr_t dma_addr;
3898 phys_addr_t phys_addr;
3899 u32 rx_status, timestamp;
3900 int pool, rx_bytes, err, ret;
3901 struct page *page;
3902 void *data;
3903
3904 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3905 data = (void *)phys_to_virt(phys_addr);
3906 page = virt_to_page(data);
3907 prefetch(page);
3908
3909 rx_done++;
3910 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3911 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3912 rx_bytes -= MVPP2_MH_SIZE;
3913 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3914
3915 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3916 MVPP2_RXD_BM_POOL_ID_OFFS;
3917 bm_pool = &port->priv->bm_pools[pool];
3918
3919 if (port->priv->percpu_pools) {
3920 pp = port->priv->page_pool[pool];
3921 dma_dir = page_pool_get_dma_dir(pp);
3922 } else {
3923 dma_dir = DMA_FROM_DEVICE;
3924 }
3925
3926 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3927 rx_bytes + MVPP2_MH_SIZE,
3928 dma_dir);
3929
3930
3931 if (rx_status & MVPP2_RXD_BUF_HDR)
3932 goto err_drop_frame;
3933
3934
3935
3936
3937
3938
3939 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3940 goto err_drop_frame;
3941
3942
3943 prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3944
3945 if (bm_pool->frag_size > PAGE_SIZE)
3946 frag_size = 0;
3947 else
3948 frag_size = bm_pool->frag_size;
3949
3950 if (xdp_prog) {
3951 struct xdp_rxq_info *xdp_rxq;
3952
3953 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3954 xdp_rxq = &rxq->xdp_rxq_short;
3955 else
3956 xdp_rxq = &rxq->xdp_rxq_long;
3957
3958 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3959 xdp_prepare_buff(&xdp, data,
3960 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3961 rx_bytes, false);
3962
3963 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
3964
3965 if (ret) {
3966 xdp_ret |= ret;
3967 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3968 if (err) {
3969 netdev_err(port->dev, "failed to refill BM pools\n");
3970 goto err_drop_frame;
3971 }
3972
3973 ps.rx_packets++;
3974 ps.rx_bytes += rx_bytes;
3975 continue;
3976 }
3977 }
3978
3979 skb = build_skb(data, frag_size);
3980 if (!skb) {
3981 netdev_warn(port->dev, "skb build failed\n");
3982 goto err_drop_frame;
3983 }
3984
3985
3986
3987
3988 if (mvpp22_rx_hwtstamping(port)) {
3989 timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
3990 mvpp22_tai_tstamp(port->priv->tai, timestamp,
3991 skb_hwtstamps(skb));
3992 }
3993
3994 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3995 if (err) {
3996 netdev_err(port->dev, "failed to refill BM pools\n");
3997 dev_kfree_skb_any(skb);
3998 goto err_drop_frame;
3999 }
4000
4001 if (pp)
4002 skb_mark_for_recycle(skb);
4003 else
4004 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4005 bm_pool->buf_size, DMA_FROM_DEVICE,
4006 DMA_ATTR_SKIP_CPU_SYNC);
4007
4008 ps.rx_packets++;
4009 ps.rx_bytes += rx_bytes;
4010
4011 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4012 skb_put(skb, rx_bytes);
4013 skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4014 skb->protocol = eth_type_trans(skb, dev);
4015
4016 napi_gro_receive(napi, skb);
4017 continue;
4018
4019 err_drop_frame:
4020 dev->stats.rx_errors++;
4021 mvpp2_rx_error(port, rx_desc);
4022
4023 if (rx_status & MVPP2_RXD_BUF_HDR)
4024 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
4025 else
4026 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4027 }
4028
4029 if (xdp_ret & MVPP2_XDP_REDIR)
4030 xdp_do_flush_map();
4031
4032 if (ps.rx_packets) {
4033 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4034
4035 u64_stats_update_begin(&stats->syncp);
4036 stats->rx_packets += ps.rx_packets;
4037 stats->rx_bytes += ps.rx_bytes;
4038
4039 stats->xdp_redirect += ps.xdp_redirect;
4040 stats->xdp_pass += ps.xdp_pass;
4041 stats->xdp_drop += ps.xdp_drop;
4042 u64_stats_update_end(&stats->syncp);
4043 }
4044
4045
4046 wmb();
4047 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4048
4049 return rx_todo;
4050 }
4051
4052 static inline void
4053 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4054 struct mvpp2_tx_desc *desc)
4055 {
4056 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4057 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4058
4059 dma_addr_t buf_dma_addr =
4060 mvpp2_txdesc_dma_addr_get(port, desc);
4061 size_t buf_sz =
4062 mvpp2_txdesc_size_get(port, desc);
4063 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4064 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4065 buf_sz, DMA_TO_DEVICE);
4066 mvpp2_txq_desc_put(txq);
4067 }
4068
4069 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4070 struct mvpp2_tx_desc *desc)
4071 {
4072
4073 if (port->priv->hw_version >= MVPP22)
4074 desc->pp22.ptp_descriptor &=
4075 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4076 }
4077
4078 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4079 struct mvpp2_tx_desc *tx_desc,
4080 struct sk_buff *skb)
4081 {
4082 struct mvpp2_hwtstamp_queue *queue;
4083 unsigned int mtype, type, i;
4084 struct ptp_header *hdr;
4085 u64 ptpdesc;
4086
4087 if (port->priv->hw_version == MVPP21 ||
4088 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4089 return false;
4090
4091 type = ptp_classify_raw(skb);
4092 if (!type)
4093 return false;
4094
4095 hdr = ptp_parse_header(skb, type);
4096 if (!hdr)
4097 return false;
4098
4099 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4100
4101 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4102 MVPP22_PTP_ACTION_CAPTURE;
4103 queue = &port->tx_hwtstamp_queue[0];
4104
4105 switch (type & PTP_CLASS_VMASK) {
4106 case PTP_CLASS_V1:
4107 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4108 break;
4109
4110 case PTP_CLASS_V2:
4111 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4112 mtype = hdr->tsmt & 15;
4113
4114 if (mtype == 0) {
4115 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4116 queue = &port->tx_hwtstamp_queue[1];
4117 }
4118 break;
4119 }
4120
4121
4122 i = queue->next;
4123 queue->next = (i + 1) & 31;
4124 if (queue->skb[i])
4125 dev_kfree_skb_any(queue->skb[i]);
4126 queue->skb[i] = skb_get(skb);
4127
4128 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147 tx_desc->pp22.ptp_descriptor &=
4148 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4149 tx_desc->pp22.ptp_descriptor |=
4150 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4151 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4152 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4153
4154 return true;
4155 }
4156
4157
4158 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4159 struct mvpp2_tx_queue *aggr_txq,
4160 struct mvpp2_tx_queue *txq)
4161 {
4162 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4163 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4164 struct mvpp2_tx_desc *tx_desc;
4165 int i;
4166 dma_addr_t buf_dma_addr;
4167
4168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4169 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4170 void *addr = skb_frag_address(frag);
4171
4172 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4173 mvpp2_txdesc_clear_ptp(port, tx_desc);
4174 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4175 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4176
4177 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4178 skb_frag_size(frag),
4179 DMA_TO_DEVICE);
4180 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4181 mvpp2_txq_desc_put(txq);
4182 goto cleanup;
4183 }
4184
4185 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4186
4187 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4188
4189 mvpp2_txdesc_cmd_set(port, tx_desc,
4190 MVPP2_TXD_L_DESC);
4191 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4192 } else {
4193
4194 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4195 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4196 }
4197 }
4198
4199 return 0;
4200 cleanup:
4201
4202
4203
4204 for (i = i - 1; i >= 0; i--) {
4205 tx_desc = txq->descs + i;
4206 tx_desc_unmap_put(port, txq, tx_desc);
4207 }
4208
4209 return -ENOMEM;
4210 }
4211
4212 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4213 struct net_device *dev,
4214 struct mvpp2_tx_queue *txq,
4215 struct mvpp2_tx_queue *aggr_txq,
4216 struct mvpp2_txq_pcpu *txq_pcpu,
4217 int hdr_sz)
4218 {
4219 struct mvpp2_port *port = netdev_priv(dev);
4220 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4221 dma_addr_t addr;
4222
4223 mvpp2_txdesc_clear_ptp(port, tx_desc);
4224 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4225 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4226
4227 addr = txq_pcpu->tso_headers_dma +
4228 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4229 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4230
4231 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4232 MVPP2_TXD_F_DESC |
4233 MVPP2_TXD_PADDING_DISABLE);
4234 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4235 }
4236
4237 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4238 struct net_device *dev, struct tso_t *tso,
4239 struct mvpp2_tx_queue *txq,
4240 struct mvpp2_tx_queue *aggr_txq,
4241 struct mvpp2_txq_pcpu *txq_pcpu,
4242 int sz, bool left, bool last)
4243 {
4244 struct mvpp2_port *port = netdev_priv(dev);
4245 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4246 dma_addr_t buf_dma_addr;
4247
4248 mvpp2_txdesc_clear_ptp(port, tx_desc);
4249 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4250 mvpp2_txdesc_size_set(port, tx_desc, sz);
4251
4252 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4253 DMA_TO_DEVICE);
4254 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4255 mvpp2_txq_desc_put(txq);
4256 return -ENOMEM;
4257 }
4258
4259 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4260
4261 if (!left) {
4262 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4263 if (last) {
4264 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4265 return 0;
4266 }
4267 } else {
4268 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4269 }
4270
4271 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4272 return 0;
4273 }
4274
4275 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4276 struct mvpp2_tx_queue *txq,
4277 struct mvpp2_tx_queue *aggr_txq,
4278 struct mvpp2_txq_pcpu *txq_pcpu)
4279 {
4280 struct mvpp2_port *port = netdev_priv(dev);
4281 int hdr_sz, i, len, descs = 0;
4282 struct tso_t tso;
4283
4284
4285 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4286 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4287 tso_count_descs(skb)))
4288 return 0;
4289
4290 hdr_sz = tso_start(skb, &tso);
4291
4292 len = skb->len - hdr_sz;
4293 while (len > 0) {
4294 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4295 char *hdr = txq_pcpu->tso_headers +
4296 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4297
4298 len -= left;
4299 descs++;
4300
4301 tso_build_hdr(skb, hdr, &tso, left, len == 0);
4302 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4303
4304 while (left > 0) {
4305 int sz = min_t(int, tso.size, left);
4306 left -= sz;
4307 descs++;
4308
4309 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4310 txq_pcpu, sz, left, len == 0))
4311 goto release;
4312 tso_build_data(skb, &tso, sz);
4313 }
4314 }
4315
4316 return descs;
4317
4318 release:
4319 for (i = descs - 1; i >= 0; i--) {
4320 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4321 tx_desc_unmap_put(port, txq, tx_desc);
4322 }
4323 return 0;
4324 }
4325
4326
4327 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4328 {
4329 struct mvpp2_port *port = netdev_priv(dev);
4330 struct mvpp2_tx_queue *txq, *aggr_txq;
4331 struct mvpp2_txq_pcpu *txq_pcpu;
4332 struct mvpp2_tx_desc *tx_desc;
4333 dma_addr_t buf_dma_addr;
4334 unsigned long flags = 0;
4335 unsigned int thread;
4336 int frags = 0;
4337 u16 txq_id;
4338 u32 tx_cmd;
4339
4340 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4341
4342 txq_id = skb_get_queue_mapping(skb);
4343 txq = port->txqs[txq_id];
4344 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4345 aggr_txq = &port->priv->aggr_txqs[thread];
4346
4347 if (test_bit(thread, &port->priv->lock_map))
4348 spin_lock_irqsave(&port->tx_lock[thread], flags);
4349
4350 if (skb_is_gso(skb)) {
4351 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4352 goto out;
4353 }
4354 frags = skb_shinfo(skb)->nr_frags + 1;
4355
4356
4357 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4358 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4359 frags = 0;
4360 goto out;
4361 }
4362
4363
4364 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4365 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4366 !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4367 mvpp2_txdesc_clear_ptp(port, tx_desc);
4368 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4369 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4370
4371 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4372 skb_headlen(skb), DMA_TO_DEVICE);
4373 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4374 mvpp2_txq_desc_put(txq);
4375 frags = 0;
4376 goto out;
4377 }
4378
4379 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4380
4381 tx_cmd = mvpp2_skb_tx_csum(port, skb);
4382
4383 if (frags == 1) {
4384
4385 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4386 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4387 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4388 } else {
4389
4390 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4391 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4392 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4393
4394
4395 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4396 tx_desc_unmap_put(port, txq, tx_desc);
4397 frags = 0;
4398 }
4399 }
4400
4401 out:
4402 if (frags > 0) {
4403 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4404 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4405
4406 txq_pcpu->reserved_num -= frags;
4407 txq_pcpu->count += frags;
4408 aggr_txq->count += frags;
4409
4410
4411 wmb();
4412 mvpp2_aggr_txq_pend_desc_add(port, frags);
4413
4414 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4415 netif_tx_stop_queue(nq);
4416
4417 u64_stats_update_begin(&stats->syncp);
4418 stats->tx_packets++;
4419 stats->tx_bytes += skb->len;
4420 u64_stats_update_end(&stats->syncp);
4421 } else {
4422 dev->stats.tx_dropped++;
4423 dev_kfree_skb_any(skb);
4424 }
4425
4426
4427 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4428 mvpp2_txq_done(port, txq, txq_pcpu);
4429
4430
4431 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4432 txq_pcpu->count > 0) {
4433 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4434
4435 if (!port_pcpu->timer_scheduled) {
4436 port_pcpu->timer_scheduled = true;
4437 hrtimer_start(&port_pcpu->tx_done_timer,
4438 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4439 HRTIMER_MODE_REL_PINNED_SOFT);
4440 }
4441 }
4442
4443 if (test_bit(thread, &port->priv->lock_map))
4444 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4445
4446 return NETDEV_TX_OK;
4447 }
4448
4449 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4450 {
4451 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4452 netdev_err(dev, "FCS error\n");
4453 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4454 netdev_err(dev, "rx fifo overrun error\n");
4455 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4456 netdev_err(dev, "tx fifo underrun error\n");
4457 }
4458
4459 static int mvpp2_poll(struct napi_struct *napi, int budget)
4460 {
4461 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4462 int rx_done = 0;
4463 struct mvpp2_port *port = netdev_priv(napi->dev);
4464 struct mvpp2_queue_vector *qv;
4465 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4466
4467 qv = container_of(napi, struct mvpp2_queue_vector, napi);
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4480 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4481
4482 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4483 if (cause_misc) {
4484 mvpp2_cause_error(port->dev, cause_misc);
4485
4486
4487 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4488 mvpp2_thread_write(port->priv, thread,
4489 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4490 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4491 }
4492
4493 if (port->has_tx_irqs) {
4494 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4495 if (cause_tx) {
4496 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4497 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4498 }
4499 }
4500
4501
4502 cause_rx = cause_rx_tx &
4503 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4504 cause_rx <<= qv->first_rxq;
4505 cause_rx |= qv->pending_cause_rx;
4506 while (cause_rx && budget > 0) {
4507 int count;
4508 struct mvpp2_rx_queue *rxq;
4509
4510 rxq = mvpp2_get_rx_queue(port, cause_rx);
4511 if (!rxq)
4512 break;
4513
4514 count = mvpp2_rx(port, napi, budget, rxq);
4515 rx_done += count;
4516 budget -= count;
4517 if (budget > 0) {
4518
4519
4520
4521
4522 cause_rx &= ~(1 << rxq->logic_rxq);
4523 }
4524 }
4525
4526 if (budget > 0) {
4527 cause_rx = 0;
4528 napi_complete_done(napi, rx_done);
4529
4530 mvpp2_qvec_interrupt_enable(qv);
4531 }
4532 qv->pending_cause_rx = cause_rx;
4533 return rx_done;
4534 }
4535
4536 static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
4537 phy_interface_t interface)
4538 {
4539 u32 ctrl3;
4540
4541
4542 mvpp2_mac_reset_assert(port);
4543
4544
4545 mvpp22_pcs_reset_assert(port);
4546
4547
4548 mvpp22_comphy_init(port, interface);
4549
4550
4551 mvpp22_gop_init(port, interface);
4552
4553 mvpp22_pcs_reset_deassert(port, interface);
4554
4555 if (mvpp2_port_supports_xlg(port)) {
4556 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4557 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4558
4559 if (mvpp2_is_xlg(interface))
4560 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4561 else
4562 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4563
4564 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4565 }
4566
4567 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
4568 mvpp2_xlg_max_rx_size_set(port);
4569 else
4570 mvpp2_gmac_max_rx_size_set(port);
4571 }
4572
4573
4574 static void mvpp2_start_dev(struct mvpp2_port *port)
4575 {
4576 int i;
4577
4578 mvpp2_txp_max_tx_size_set(port);
4579
4580 for (i = 0; i < port->nqvecs; i++)
4581 napi_enable(&port->qvecs[i].napi);
4582
4583
4584 mvpp2_interrupts_enable(port);
4585
4586 if (port->priv->hw_version >= MVPP22)
4587 mvpp22_mode_reconfigure(port, port->phy_interface);
4588
4589 if (port->phylink) {
4590 phylink_start(port->phylink);
4591 } else {
4592 mvpp2_acpi_start(port);
4593 }
4594
4595 netif_tx_start_all_queues(port->dev);
4596
4597 clear_bit(0, &port->state);
4598 }
4599
4600
4601 static void mvpp2_stop_dev(struct mvpp2_port *port)
4602 {
4603 int i;
4604
4605 set_bit(0, &port->state);
4606
4607
4608 mvpp2_interrupts_disable(port);
4609
4610 for (i = 0; i < port->nqvecs; i++)
4611 napi_disable(&port->qvecs[i].napi);
4612
4613 if (port->phylink)
4614 phylink_stop(port->phylink);
4615 phy_power_off(port->comphy);
4616 }
4617
4618 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4619 struct ethtool_ringparam *ring)
4620 {
4621 u16 new_rx_pending = ring->rx_pending;
4622 u16 new_tx_pending = ring->tx_pending;
4623
4624 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4625 return -EINVAL;
4626
4627 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4628 new_rx_pending = MVPP2_MAX_RXD_MAX;
4629 else if (ring->rx_pending < MSS_THRESHOLD_START)
4630 new_rx_pending = MSS_THRESHOLD_START;
4631 else if (!IS_ALIGNED(ring->rx_pending, 16))
4632 new_rx_pending = ALIGN(ring->rx_pending, 16);
4633
4634 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4635 new_tx_pending = MVPP2_MAX_TXD_MAX;
4636 else if (!IS_ALIGNED(ring->tx_pending, 32))
4637 new_tx_pending = ALIGN(ring->tx_pending, 32);
4638
4639
4640
4641
4642 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4643 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4644
4645 if (ring->rx_pending != new_rx_pending) {
4646 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4647 ring->rx_pending, new_rx_pending);
4648 ring->rx_pending = new_rx_pending;
4649 }
4650
4651 if (ring->tx_pending != new_tx_pending) {
4652 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4653 ring->tx_pending, new_tx_pending);
4654 ring->tx_pending = new_tx_pending;
4655 }
4656
4657 return 0;
4658 }
4659
4660 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4661 {
4662 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4663
4664 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4665 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4666 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4667 addr[0] = (mac_addr_h >> 24) & 0xFF;
4668 addr[1] = (mac_addr_h >> 16) & 0xFF;
4669 addr[2] = (mac_addr_h >> 8) & 0xFF;
4670 addr[3] = mac_addr_h & 0xFF;
4671 addr[4] = mac_addr_m & 0xFF;
4672 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4673 }
4674
4675 static int mvpp2_irqs_init(struct mvpp2_port *port)
4676 {
4677 int err, i;
4678
4679 for (i = 0; i < port->nqvecs; i++) {
4680 struct mvpp2_queue_vector *qv = port->qvecs + i;
4681
4682 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4683 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4684 if (!qv->mask) {
4685 err = -ENOMEM;
4686 goto err;
4687 }
4688
4689 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4690 }
4691
4692 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4693 if (err)
4694 goto err;
4695
4696 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4697 unsigned int cpu;
4698
4699 for_each_present_cpu(cpu) {
4700 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4701 qv->sw_thread_id)
4702 cpumask_set_cpu(cpu, qv->mask);
4703 }
4704
4705 irq_set_affinity_hint(qv->irq, qv->mask);
4706 }
4707 }
4708
4709 return 0;
4710 err:
4711 for (i = 0; i < port->nqvecs; i++) {
4712 struct mvpp2_queue_vector *qv = port->qvecs + i;
4713
4714 irq_set_affinity_hint(qv->irq, NULL);
4715 kfree(qv->mask);
4716 qv->mask = NULL;
4717 free_irq(qv->irq, qv);
4718 }
4719
4720 return err;
4721 }
4722
4723 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4724 {
4725 int i;
4726
4727 for (i = 0; i < port->nqvecs; i++) {
4728 struct mvpp2_queue_vector *qv = port->qvecs + i;
4729
4730 irq_set_affinity_hint(qv->irq, NULL);
4731 kfree(qv->mask);
4732 qv->mask = NULL;
4733 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4734 free_irq(qv->irq, qv);
4735 }
4736 }
4737
4738 static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
4739 {
4740 return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
4741 !(port->flags & MVPP2_F_LOOPBACK);
4742 }
4743
4744 static int mvpp2_open(struct net_device *dev)
4745 {
4746 struct mvpp2_port *port = netdev_priv(dev);
4747 struct mvpp2 *priv = port->priv;
4748 unsigned char mac_bcast[ETH_ALEN] = {
4749 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4750 bool valid = false;
4751 int err;
4752
4753 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4754 if (err) {
4755 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4756 return err;
4757 }
4758 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4759 if (err) {
4760 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4761 return err;
4762 }
4763 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4764 if (err) {
4765 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4766 return err;
4767 }
4768 err = mvpp2_prs_def_flow(port);
4769 if (err) {
4770 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4771 return err;
4772 }
4773
4774
4775 err = mvpp2_setup_rxqs(port);
4776 if (err) {
4777 netdev_err(port->dev, "cannot allocate Rx queues\n");
4778 return err;
4779 }
4780
4781 err = mvpp2_setup_txqs(port);
4782 if (err) {
4783 netdev_err(port->dev, "cannot allocate Tx queues\n");
4784 goto err_cleanup_rxqs;
4785 }
4786
4787 err = mvpp2_irqs_init(port);
4788 if (err) {
4789 netdev_err(port->dev, "cannot init IRQs\n");
4790 goto err_cleanup_txqs;
4791 }
4792
4793 if (port->phylink) {
4794 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4795 if (err) {
4796 netdev_err(port->dev, "could not attach PHY (%d)\n",
4797 err);
4798 goto err_free_irq;
4799 }
4800
4801 valid = true;
4802 }
4803
4804 if (priv->hw_version >= MVPP22 && port->port_irq) {
4805 err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4806 dev->name, port);
4807 if (err) {
4808 netdev_err(port->dev,
4809 "cannot request port link/ptp IRQ %d\n",
4810 port->port_irq);
4811 goto err_free_irq;
4812 }
4813
4814 mvpp22_gop_setup_irq(port);
4815
4816
4817 netif_carrier_off(port->dev);
4818
4819 valid = true;
4820 } else {
4821 port->port_irq = 0;
4822 }
4823
4824 if (!valid) {
4825 netdev_err(port->dev,
4826 "invalid configuration: no dt or link IRQ");
4827 err = -ENOENT;
4828 goto err_free_irq;
4829 }
4830
4831
4832 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4833 mvpp2_shared_interrupt_mask_unmask(port, false);
4834
4835 mvpp2_start_dev(port);
4836
4837
4838 queue_delayed_work(priv->stats_queue, &port->stats_work,
4839 MVPP2_MIB_COUNTERS_STATS_DELAY);
4840
4841 return 0;
4842
4843 err_free_irq:
4844 mvpp2_irqs_deinit(port);
4845 err_cleanup_txqs:
4846 mvpp2_cleanup_txqs(port);
4847 err_cleanup_rxqs:
4848 mvpp2_cleanup_rxqs(port);
4849 return err;
4850 }
4851
4852 static int mvpp2_stop(struct net_device *dev)
4853 {
4854 struct mvpp2_port *port = netdev_priv(dev);
4855 struct mvpp2_port_pcpu *port_pcpu;
4856 unsigned int thread;
4857
4858 mvpp2_stop_dev(port);
4859
4860
4861 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4862 mvpp2_shared_interrupt_mask_unmask(port, true);
4863
4864 if (port->phylink)
4865 phylink_disconnect_phy(port->phylink);
4866 if (port->port_irq)
4867 free_irq(port->port_irq, port);
4868
4869 mvpp2_irqs_deinit(port);
4870 if (!port->has_tx_irqs) {
4871 for (thread = 0; thread < port->priv->nthreads; thread++) {
4872 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4873
4874 hrtimer_cancel(&port_pcpu->tx_done_timer);
4875 port_pcpu->timer_scheduled = false;
4876 }
4877 }
4878 mvpp2_cleanup_rxqs(port);
4879 mvpp2_cleanup_txqs(port);
4880
4881 cancel_delayed_work_sync(&port->stats_work);
4882
4883 mvpp2_mac_reset_assert(port);
4884 mvpp22_pcs_reset_assert(port);
4885
4886 return 0;
4887 }
4888
4889 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4890 struct netdev_hw_addr_list *list)
4891 {
4892 struct netdev_hw_addr *ha;
4893 int ret;
4894
4895 netdev_hw_addr_list_for_each(ha, list) {
4896 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4897 if (ret)
4898 return ret;
4899 }
4900
4901 return 0;
4902 }
4903
4904 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4905 {
4906 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4907 mvpp2_prs_vid_enable_filtering(port);
4908 else
4909 mvpp2_prs_vid_disable_filtering(port);
4910
4911 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4912 MVPP2_PRS_L2_UNI_CAST, enable);
4913
4914 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4915 MVPP2_PRS_L2_MULTI_CAST, enable);
4916 }
4917
4918 static void mvpp2_set_rx_mode(struct net_device *dev)
4919 {
4920 struct mvpp2_port *port = netdev_priv(dev);
4921
4922
4923 mvpp2_prs_mac_del_all(port);
4924
4925 if (dev->flags & IFF_PROMISC) {
4926 mvpp2_set_rx_promisc(port, true);
4927 return;
4928 }
4929
4930 mvpp2_set_rx_promisc(port, false);
4931
4932 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4933 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4934 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4935 MVPP2_PRS_L2_UNI_CAST, true);
4936
4937 if (dev->flags & IFF_ALLMULTI) {
4938 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4939 MVPP2_PRS_L2_MULTI_CAST, true);
4940 return;
4941 }
4942
4943 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4944 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4945 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4946 MVPP2_PRS_L2_MULTI_CAST, true);
4947 }
4948
4949 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4950 {
4951 const struct sockaddr *addr = p;
4952 int err;
4953
4954 if (!is_valid_ether_addr(addr->sa_data))
4955 return -EADDRNOTAVAIL;
4956
4957 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4958 if (err) {
4959
4960 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4961 netdev_err(dev, "failed to change MAC address\n");
4962 }
4963 return err;
4964 }
4965
4966
4967
4968
4969 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4970 {
4971 bool change_percpu = (percpu != priv->percpu_pools);
4972 int numbufs = MVPP2_BM_POOLS_NUM, i;
4973 struct mvpp2_port *port = NULL;
4974 bool status[MVPP2_MAX_PORTS];
4975
4976 for (i = 0; i < priv->port_count; i++) {
4977 port = priv->port_list[i];
4978 status[i] = netif_running(port->dev);
4979 if (status[i])
4980 mvpp2_stop(port->dev);
4981 }
4982
4983
4984 if (priv->percpu_pools)
4985 numbufs = port->nrxqs * 2;
4986
4987 if (change_percpu)
4988 mvpp2_bm_pool_update_priv_fc(priv, false);
4989
4990 for (i = 0; i < numbufs; i++)
4991 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4992
4993 devm_kfree(port->dev->dev.parent, priv->bm_pools);
4994 priv->percpu_pools = percpu;
4995 mvpp2_bm_init(port->dev->dev.parent, priv);
4996
4997 for (i = 0; i < priv->port_count; i++) {
4998 port = priv->port_list[i];
4999 mvpp2_swf_bm_pool_init(port);
5000 if (status[i])
5001 mvpp2_open(port->dev);
5002 }
5003
5004 if (change_percpu)
5005 mvpp2_bm_pool_update_priv_fc(priv, true);
5006
5007 return 0;
5008 }
5009
5010 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5011 {
5012 struct mvpp2_port *port = netdev_priv(dev);
5013 bool running = netif_running(dev);
5014 struct mvpp2 *priv = port->priv;
5015 int err;
5016
5017 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5018 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5019 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5020 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5021 }
5022
5023 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
5024 netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
5025 mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
5026 return -EINVAL;
5027 }
5028
5029 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
5030 if (priv->percpu_pools) {
5031 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
5032 mvpp2_bm_switch_buffers(priv, false);
5033 }
5034 } else {
5035 bool jumbo = false;
5036 int i;
5037
5038 for (i = 0; i < priv->port_count; i++)
5039 if (priv->port_list[i] != port &&
5040 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
5041 MVPP2_BM_LONG_PKT_SIZE) {
5042 jumbo = true;
5043 break;
5044 }
5045
5046
5047 if (!jumbo) {
5048 dev_info(port->dev->dev.parent,
5049 "all ports have a low MTU, switching to per-cpu buffers");
5050 mvpp2_bm_switch_buffers(priv, true);
5051 }
5052 }
5053
5054 if (running)
5055 mvpp2_stop_dev(port);
5056
5057 err = mvpp2_bm_update_mtu(dev, mtu);
5058 if (err) {
5059 netdev_err(dev, "failed to change MTU\n");
5060
5061 mvpp2_bm_update_mtu(dev, dev->mtu);
5062 } else {
5063 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5064 }
5065
5066 if (running) {
5067 mvpp2_start_dev(port);
5068 mvpp2_egress_enable(port);
5069 mvpp2_ingress_enable(port);
5070 }
5071
5072 return err;
5073 }
5074
5075 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5076 {
5077 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5078 struct mvpp2 *priv = port->priv;
5079 int err = -1, i;
5080
5081 if (!priv->percpu_pools)
5082 return err;
5083
5084 if (!priv->page_pool[0])
5085 return -ENOMEM;
5086
5087 for (i = 0; i < priv->port_count; i++) {
5088 port = priv->port_list[i];
5089 if (port->xdp_prog) {
5090 dma_dir = DMA_BIDIRECTIONAL;
5091 break;
5092 }
5093 }
5094
5095
5096 if (priv->page_pool[0]->p.dma_dir != dma_dir)
5097 err = mvpp2_bm_switch_buffers(priv, true);
5098
5099 return err;
5100 }
5101
5102 static void
5103 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5104 {
5105 struct mvpp2_port *port = netdev_priv(dev);
5106 unsigned int start;
5107 unsigned int cpu;
5108
5109 for_each_possible_cpu(cpu) {
5110 struct mvpp2_pcpu_stats *cpu_stats;
5111 u64 rx_packets;
5112 u64 rx_bytes;
5113 u64 tx_packets;
5114 u64 tx_bytes;
5115
5116 cpu_stats = per_cpu_ptr(port->stats, cpu);
5117 do {
5118 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5119 rx_packets = cpu_stats->rx_packets;
5120 rx_bytes = cpu_stats->rx_bytes;
5121 tx_packets = cpu_stats->tx_packets;
5122 tx_bytes = cpu_stats->tx_bytes;
5123 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5124
5125 stats->rx_packets += rx_packets;
5126 stats->rx_bytes += rx_bytes;
5127 stats->tx_packets += tx_packets;
5128 stats->tx_bytes += tx_bytes;
5129 }
5130
5131 stats->rx_errors = dev->stats.rx_errors;
5132 stats->rx_dropped = dev->stats.rx_dropped;
5133 stats->tx_dropped = dev->stats.tx_dropped;
5134 }
5135
5136 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5137 {
5138 struct hwtstamp_config config;
5139 void __iomem *ptp;
5140 u32 gcr, int_mask;
5141
5142 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5143 return -EFAULT;
5144
5145 if (config.tx_type != HWTSTAMP_TX_OFF &&
5146 config.tx_type != HWTSTAMP_TX_ON)
5147 return -ERANGE;
5148
5149 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5150
5151 int_mask = gcr = 0;
5152 if (config.tx_type != HWTSTAMP_TX_OFF) {
5153 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5154 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5155 MVPP22_PTP_INT_MASK_QUEUE0;
5156 }
5157
5158
5159 if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5160 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5161 MVPP22_PTP_GCR_TX_RESET;
5162
5163 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5164 mvpp22_tai_start(port->priv->tai);
5165
5166 if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5167 config.rx_filter = HWTSTAMP_FILTER_ALL;
5168 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5169 MVPP22_PTP_GCR_RX_RESET |
5170 MVPP22_PTP_GCR_TX_RESET |
5171 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5172 port->rx_hwtstamp = true;
5173 } else {
5174 port->rx_hwtstamp = false;
5175 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5176 MVPP22_PTP_GCR_RX_RESET |
5177 MVPP22_PTP_GCR_TX_RESET |
5178 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5179 }
5180
5181 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5182 MVPP22_PTP_INT_MASK_QUEUE1 |
5183 MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5184
5185 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5186 mvpp22_tai_stop(port->priv->tai);
5187
5188 port->tx_hwtstamp_type = config.tx_type;
5189
5190 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5191 return -EFAULT;
5192
5193 return 0;
5194 }
5195
5196 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5197 {
5198 struct hwtstamp_config config;
5199
5200 memset(&config, 0, sizeof(config));
5201
5202 config.tx_type = port->tx_hwtstamp_type;
5203 config.rx_filter = port->rx_hwtstamp ?
5204 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5205
5206 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5207 return -EFAULT;
5208
5209 return 0;
5210 }
5211
5212 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5213 struct ethtool_ts_info *info)
5214 {
5215 struct mvpp2_port *port = netdev_priv(dev);
5216
5217 if (!port->hwtstamp)
5218 return -EOPNOTSUPP;
5219
5220 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5221 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5222 SOF_TIMESTAMPING_RX_SOFTWARE |
5223 SOF_TIMESTAMPING_SOFTWARE |
5224 SOF_TIMESTAMPING_TX_HARDWARE |
5225 SOF_TIMESTAMPING_RX_HARDWARE |
5226 SOF_TIMESTAMPING_RAW_HARDWARE;
5227 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5228 BIT(HWTSTAMP_TX_ON);
5229 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5230 BIT(HWTSTAMP_FILTER_ALL);
5231
5232 return 0;
5233 }
5234
5235 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5236 {
5237 struct mvpp2_port *port = netdev_priv(dev);
5238
5239 switch (cmd) {
5240 case SIOCSHWTSTAMP:
5241 if (port->hwtstamp)
5242 return mvpp2_set_ts_config(port, ifr);
5243 break;
5244
5245 case SIOCGHWTSTAMP:
5246 if (port->hwtstamp)
5247 return mvpp2_get_ts_config(port, ifr);
5248 break;
5249 }
5250
5251 if (!port->phylink)
5252 return -ENOTSUPP;
5253
5254 return phylink_mii_ioctl(port->phylink, ifr, cmd);
5255 }
5256
5257 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5258 {
5259 struct mvpp2_port *port = netdev_priv(dev);
5260 int ret;
5261
5262 ret = mvpp2_prs_vid_entry_add(port, vid);
5263 if (ret)
5264 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5265 MVPP2_PRS_VLAN_FILT_MAX - 1);
5266 return ret;
5267 }
5268
5269 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5270 {
5271 struct mvpp2_port *port = netdev_priv(dev);
5272
5273 mvpp2_prs_vid_entry_remove(port, vid);
5274 return 0;
5275 }
5276
5277 static int mvpp2_set_features(struct net_device *dev,
5278 netdev_features_t features)
5279 {
5280 netdev_features_t changed = dev->features ^ features;
5281 struct mvpp2_port *port = netdev_priv(dev);
5282
5283 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5284 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5285 mvpp2_prs_vid_enable_filtering(port);
5286 } else {
5287
5288
5289
5290 mvpp2_prs_vid_remove_all(port);
5291
5292 mvpp2_prs_vid_disable_filtering(port);
5293 }
5294 }
5295
5296 if (changed & NETIF_F_RXHASH) {
5297 if (features & NETIF_F_RXHASH)
5298 mvpp22_port_rss_enable(port);
5299 else
5300 mvpp22_port_rss_disable(port);
5301 }
5302
5303 return 0;
5304 }
5305
5306 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
5307 {
5308 struct bpf_prog *prog = bpf->prog, *old_prog;
5309 bool running = netif_running(port->dev);
5310 bool reset = !prog != !port->xdp_prog;
5311
5312 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
5313 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
5314 return -EOPNOTSUPP;
5315 }
5316
5317 if (!port->priv->percpu_pools) {
5318 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
5319 return -EOPNOTSUPP;
5320 }
5321
5322 if (port->ntxqs < num_possible_cpus() * 2) {
5323 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5324 return -EOPNOTSUPP;
5325 }
5326
5327
5328 if (running && reset)
5329 mvpp2_stop(port->dev);
5330
5331 old_prog = xchg(&port->xdp_prog, prog);
5332 if (old_prog)
5333 bpf_prog_put(old_prog);
5334
5335
5336 if (!reset)
5337 return 0;
5338
5339
5340 if (running)
5341 mvpp2_open(port->dev);
5342
5343
5344 mvpp2_check_pagepool_dma(port);
5345
5346 return 0;
5347 }
5348
5349 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5350 {
5351 struct mvpp2_port *port = netdev_priv(dev);
5352
5353 switch (xdp->command) {
5354 case XDP_SETUP_PROG:
5355 return mvpp2_xdp_setup(port, xdp);
5356 default:
5357 return -EINVAL;
5358 }
5359 }
5360
5361
5362
5363 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5364 {
5365 struct mvpp2_port *port = netdev_priv(dev);
5366
5367 if (!port->phylink)
5368 return -ENOTSUPP;
5369
5370 return phylink_ethtool_nway_reset(port->phylink);
5371 }
5372
5373
5374 static int
5375 mvpp2_ethtool_set_coalesce(struct net_device *dev,
5376 struct ethtool_coalesce *c,
5377 struct kernel_ethtool_coalesce *kernel_coal,
5378 struct netlink_ext_ack *extack)
5379 {
5380 struct mvpp2_port *port = netdev_priv(dev);
5381 int queue;
5382
5383 for (queue = 0; queue < port->nrxqs; queue++) {
5384 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5385
5386 rxq->time_coal = c->rx_coalesce_usecs;
5387 rxq->pkts_coal = c->rx_max_coalesced_frames;
5388 mvpp2_rx_pkts_coal_set(port, rxq);
5389 mvpp2_rx_time_coal_set(port, rxq);
5390 }
5391
5392 if (port->has_tx_irqs) {
5393 port->tx_time_coal = c->tx_coalesce_usecs;
5394 mvpp2_tx_time_coal_set(port);
5395 }
5396
5397 for (queue = 0; queue < port->ntxqs; queue++) {
5398 struct mvpp2_tx_queue *txq = port->txqs[queue];
5399
5400 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5401
5402 if (port->has_tx_irqs)
5403 mvpp2_tx_pkts_coal_set(port, txq);
5404 }
5405
5406 return 0;
5407 }
5408
5409
5410 static int
5411 mvpp2_ethtool_get_coalesce(struct net_device *dev,
5412 struct ethtool_coalesce *c,
5413 struct kernel_ethtool_coalesce *kernel_coal,
5414 struct netlink_ext_ack *extack)
5415 {
5416 struct mvpp2_port *port = netdev_priv(dev);
5417
5418 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5419 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5420 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5421 c->tx_coalesce_usecs = port->tx_time_coal;
5422 return 0;
5423 }
5424
5425 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5426 struct ethtool_drvinfo *drvinfo)
5427 {
5428 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5429 sizeof(drvinfo->driver));
5430 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5431 sizeof(drvinfo->version));
5432 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5433 sizeof(drvinfo->bus_info));
5434 }
5435
5436 static void
5437 mvpp2_ethtool_get_ringparam(struct net_device *dev,
5438 struct ethtool_ringparam *ring,
5439 struct kernel_ethtool_ringparam *kernel_ring,
5440 struct netlink_ext_ack *extack)
5441 {
5442 struct mvpp2_port *port = netdev_priv(dev);
5443
5444 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5445 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5446 ring->rx_pending = port->rx_ring_size;
5447 ring->tx_pending = port->tx_ring_size;
5448 }
5449
5450 static int
5451 mvpp2_ethtool_set_ringparam(struct net_device *dev,
5452 struct ethtool_ringparam *ring,
5453 struct kernel_ethtool_ringparam *kernel_ring,
5454 struct netlink_ext_ack *extack)
5455 {
5456 struct mvpp2_port *port = netdev_priv(dev);
5457 u16 prev_rx_ring_size = port->rx_ring_size;
5458 u16 prev_tx_ring_size = port->tx_ring_size;
5459 int err;
5460
5461 err = mvpp2_check_ringparam_valid(dev, ring);
5462 if (err)
5463 return err;
5464
5465 if (!netif_running(dev)) {
5466 port->rx_ring_size = ring->rx_pending;
5467 port->tx_ring_size = ring->tx_pending;
5468 return 0;
5469 }
5470
5471
5472
5473
5474 mvpp2_stop_dev(port);
5475 mvpp2_cleanup_rxqs(port);
5476 mvpp2_cleanup_txqs(port);
5477
5478 port->rx_ring_size = ring->rx_pending;
5479 port->tx_ring_size = ring->tx_pending;
5480
5481 err = mvpp2_setup_rxqs(port);
5482 if (err) {
5483
5484 port->rx_ring_size = prev_rx_ring_size;
5485 ring->rx_pending = prev_rx_ring_size;
5486 err = mvpp2_setup_rxqs(port);
5487 if (err)
5488 goto err_out;
5489 }
5490 err = mvpp2_setup_txqs(port);
5491 if (err) {
5492
5493 port->tx_ring_size = prev_tx_ring_size;
5494 ring->tx_pending = prev_tx_ring_size;
5495 err = mvpp2_setup_txqs(port);
5496 if (err)
5497 goto err_clean_rxqs;
5498 }
5499
5500 mvpp2_start_dev(port);
5501 mvpp2_egress_enable(port);
5502 mvpp2_ingress_enable(port);
5503
5504 return 0;
5505
5506 err_clean_rxqs:
5507 mvpp2_cleanup_rxqs(port);
5508 err_out:
5509 netdev_err(dev, "failed to change ring parameters");
5510 return err;
5511 }
5512
5513 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5514 struct ethtool_pauseparam *pause)
5515 {
5516 struct mvpp2_port *port = netdev_priv(dev);
5517
5518 if (!port->phylink)
5519 return;
5520
5521 phylink_ethtool_get_pauseparam(port->phylink, pause);
5522 }
5523
5524 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5525 struct ethtool_pauseparam *pause)
5526 {
5527 struct mvpp2_port *port = netdev_priv(dev);
5528
5529 if (!port->phylink)
5530 return -ENOTSUPP;
5531
5532 return phylink_ethtool_set_pauseparam(port->phylink, pause);
5533 }
5534
5535 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5536 struct ethtool_link_ksettings *cmd)
5537 {
5538 struct mvpp2_port *port = netdev_priv(dev);
5539
5540 if (!port->phylink)
5541 return -ENOTSUPP;
5542
5543 return phylink_ethtool_ksettings_get(port->phylink, cmd);
5544 }
5545
5546 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5547 const struct ethtool_link_ksettings *cmd)
5548 {
5549 struct mvpp2_port *port = netdev_priv(dev);
5550
5551 if (!port->phylink)
5552 return -ENOTSUPP;
5553
5554 return phylink_ethtool_ksettings_set(port->phylink, cmd);
5555 }
5556
5557 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5558 struct ethtool_rxnfc *info, u32 *rules)
5559 {
5560 struct mvpp2_port *port = netdev_priv(dev);
5561 int ret = 0, i, loc = 0;
5562
5563 if (!mvpp22_rss_is_supported(port))
5564 return -EOPNOTSUPP;
5565
5566 switch (info->cmd) {
5567 case ETHTOOL_GRXFH:
5568 ret = mvpp2_ethtool_rxfh_get(port, info);
5569 break;
5570 case ETHTOOL_GRXRINGS:
5571 info->data = port->nrxqs;
5572 break;
5573 case ETHTOOL_GRXCLSRLCNT:
5574 info->rule_cnt = port->n_rfs_rules;
5575 break;
5576 case ETHTOOL_GRXCLSRULE:
5577 ret = mvpp2_ethtool_cls_rule_get(port, info);
5578 break;
5579 case ETHTOOL_GRXCLSRLALL:
5580 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5581 if (port->rfs_rules[i])
5582 rules[loc++] = i;
5583 }
5584 break;
5585 default:
5586 return -ENOTSUPP;
5587 }
5588
5589 return ret;
5590 }
5591
5592 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5593 struct ethtool_rxnfc *info)
5594 {
5595 struct mvpp2_port *port = netdev_priv(dev);
5596 int ret = 0;
5597
5598 if (!mvpp22_rss_is_supported(port))
5599 return -EOPNOTSUPP;
5600
5601 switch (info->cmd) {
5602 case ETHTOOL_SRXFH:
5603 ret = mvpp2_ethtool_rxfh_set(port, info);
5604 break;
5605 case ETHTOOL_SRXCLSRLINS:
5606 ret = mvpp2_ethtool_cls_rule_ins(port, info);
5607 break;
5608 case ETHTOOL_SRXCLSRLDEL:
5609 ret = mvpp2_ethtool_cls_rule_del(port, info);
5610 break;
5611 default:
5612 return -EOPNOTSUPP;
5613 }
5614 return ret;
5615 }
5616
5617 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5618 {
5619 struct mvpp2_port *port = netdev_priv(dev);
5620
5621 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
5622 }
5623
5624 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
5625 u8 *hfunc)
5626 {
5627 struct mvpp2_port *port = netdev_priv(dev);
5628 int ret = 0;
5629
5630 if (!mvpp22_rss_is_supported(port))
5631 return -EOPNOTSUPP;
5632
5633 if (indir)
5634 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
5635
5636 if (hfunc)
5637 *hfunc = ETH_RSS_HASH_CRC32;
5638
5639 return ret;
5640 }
5641
5642 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
5643 const u8 *key, const u8 hfunc)
5644 {
5645 struct mvpp2_port *port = netdev_priv(dev);
5646 int ret = 0;
5647
5648 if (!mvpp22_rss_is_supported(port))
5649 return -EOPNOTSUPP;
5650
5651 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5652 return -EOPNOTSUPP;
5653
5654 if (key)
5655 return -EOPNOTSUPP;
5656
5657 if (indir)
5658 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
5659
5660 return ret;
5661 }
5662
5663 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5664 u8 *key, u8 *hfunc, u32 rss_context)
5665 {
5666 struct mvpp2_port *port = netdev_priv(dev);
5667 int ret = 0;
5668
5669 if (!mvpp22_rss_is_supported(port))
5670 return -EOPNOTSUPP;
5671 if (rss_context >= MVPP22_N_RSS_TABLES)
5672 return -EINVAL;
5673
5674 if (hfunc)
5675 *hfunc = ETH_RSS_HASH_CRC32;
5676
5677 if (indir)
5678 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5679
5680 return ret;
5681 }
5682
5683 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5684 const u32 *indir, const u8 *key,
5685 const u8 hfunc, u32 *rss_context,
5686 bool delete)
5687 {
5688 struct mvpp2_port *port = netdev_priv(dev);
5689 int ret;
5690
5691 if (!mvpp22_rss_is_supported(port))
5692 return -EOPNOTSUPP;
5693
5694 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5695 return -EOPNOTSUPP;
5696
5697 if (key)
5698 return -EOPNOTSUPP;
5699
5700 if (delete)
5701 return mvpp22_port_rss_ctx_delete(port, *rss_context);
5702
5703 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5704 ret = mvpp22_port_rss_ctx_create(port, rss_context);
5705 if (ret)
5706 return ret;
5707 }
5708
5709 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5710 }
5711
5712
5713 static const struct net_device_ops mvpp2_netdev_ops = {
5714 .ndo_open = mvpp2_open,
5715 .ndo_stop = mvpp2_stop,
5716 .ndo_start_xmit = mvpp2_tx,
5717 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5718 .ndo_set_mac_address = mvpp2_set_mac_address,
5719 .ndo_change_mtu = mvpp2_change_mtu,
5720 .ndo_get_stats64 = mvpp2_get_stats64,
5721 .ndo_eth_ioctl = mvpp2_ioctl,
5722 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5723 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5724 .ndo_set_features = mvpp2_set_features,
5725 .ndo_bpf = mvpp2_xdp,
5726 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5727 };
5728
5729 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5730 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5731 ETHTOOL_COALESCE_MAX_FRAMES,
5732 .nway_reset = mvpp2_ethtool_nway_reset,
5733 .get_link = ethtool_op_get_link,
5734 .get_ts_info = mvpp2_ethtool_get_ts_info,
5735 .set_coalesce = mvpp2_ethtool_set_coalesce,
5736 .get_coalesce = mvpp2_ethtool_get_coalesce,
5737 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5738 .get_ringparam = mvpp2_ethtool_get_ringparam,
5739 .set_ringparam = mvpp2_ethtool_set_ringparam,
5740 .get_strings = mvpp2_ethtool_get_strings,
5741 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5742 .get_sset_count = mvpp2_ethtool_get_sset_count,
5743 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5744 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5745 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5746 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5747 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5748 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5749 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5750 .get_rxfh = mvpp2_ethtool_get_rxfh,
5751 .set_rxfh = mvpp2_ethtool_set_rxfh,
5752 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5753 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
5754 };
5755
5756
5757
5758
5759 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5760 struct device_node *port_node)
5761 {
5762 struct mvpp2_queue_vector *v = &port->qvecs[0];
5763
5764 v->first_rxq = 0;
5765 v->nrxqs = port->nrxqs;
5766 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5767 v->sw_thread_id = 0;
5768 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5769 v->port = port;
5770 v->irq = irq_of_parse_and_map(port_node, 0);
5771 if (v->irq <= 0)
5772 return -EINVAL;
5773 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5774 NAPI_POLL_WEIGHT);
5775
5776 port->nqvecs = 1;
5777
5778 return 0;
5779 }
5780
5781 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5782 struct device_node *port_node)
5783 {
5784 struct mvpp2 *priv = port->priv;
5785 struct mvpp2_queue_vector *v;
5786 int i, ret;
5787
5788 switch (queue_mode) {
5789 case MVPP2_QDIST_SINGLE_MODE:
5790 port->nqvecs = priv->nthreads + 1;
5791 break;
5792 case MVPP2_QDIST_MULTI_MODE:
5793 port->nqvecs = priv->nthreads;
5794 break;
5795 }
5796
5797 for (i = 0; i < port->nqvecs; i++) {
5798 char irqname[16];
5799
5800 v = port->qvecs + i;
5801
5802 v->port = port;
5803 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5804 v->sw_thread_id = i;
5805 v->sw_thread_mask = BIT(i);
5806
5807 if (port->flags & MVPP2_F_DT_COMPAT)
5808 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5809 else
5810 snprintf(irqname, sizeof(irqname), "hif%d", i);
5811
5812 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5813 v->first_rxq = i;
5814 v->nrxqs = 1;
5815 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5816 i == (port->nqvecs - 1)) {
5817 v->first_rxq = 0;
5818 v->nrxqs = port->nrxqs;
5819 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5820
5821 if (port->flags & MVPP2_F_DT_COMPAT)
5822 strncpy(irqname, "rx-shared", sizeof(irqname));
5823 }
5824
5825 if (port_node)
5826 v->irq = of_irq_get_byname(port_node, irqname);
5827 else
5828 v->irq = fwnode_irq_get(port->fwnode, i);
5829 if (v->irq <= 0) {
5830 ret = -EINVAL;
5831 goto err;
5832 }
5833
5834 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5835 NAPI_POLL_WEIGHT);
5836 }
5837
5838 return 0;
5839
5840 err:
5841 for (i = 0; i < port->nqvecs; i++)
5842 irq_dispose_mapping(port->qvecs[i].irq);
5843 return ret;
5844 }
5845
5846 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5847 struct device_node *port_node)
5848 {
5849 if (port->has_tx_irqs)
5850 return mvpp2_multi_queue_vectors_init(port, port_node);
5851 else
5852 return mvpp2_simple_queue_vectors_init(port, port_node);
5853 }
5854
5855 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5856 {
5857 int i;
5858
5859 for (i = 0; i < port->nqvecs; i++)
5860 irq_dispose_mapping(port->qvecs[i].irq);
5861 }
5862
5863
5864 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5865 {
5866 struct mvpp2 *priv = port->priv;
5867 u32 val;
5868 int i;
5869
5870 if (priv->hw_version == MVPP21) {
5871 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5872 port->nrxqs);
5873 return;
5874 }
5875
5876
5877 for (i = 0; i < port->nqvecs; i++) {
5878 struct mvpp2_queue_vector *qv = port->qvecs + i;
5879
5880 if (!qv->nrxqs)
5881 continue;
5882
5883 val = qv->sw_thread_id;
5884 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5885 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5886
5887 val = qv->first_rxq;
5888 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5889 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5890 }
5891 }
5892
5893
5894 static int mvpp2_port_init(struct mvpp2_port *port)
5895 {
5896 struct device *dev = port->dev->dev.parent;
5897 struct mvpp2 *priv = port->priv;
5898 struct mvpp2_txq_pcpu *txq_pcpu;
5899 unsigned int thread;
5900 int queue, err, val;
5901
5902
5903 if (port->first_rxq + port->nrxqs >
5904 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5905 return -EINVAL;
5906
5907 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5908 return -EINVAL;
5909
5910
5911 mvpp2_egress_disable(port);
5912 mvpp2_port_disable(port);
5913
5914 if (mvpp2_is_xlg(port->phy_interface)) {
5915 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5916 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5917 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5918 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5919 } else {
5920 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5921 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5922 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5923 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5924 }
5925
5926 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5927
5928 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5929 GFP_KERNEL);
5930 if (!port->txqs)
5931 return -ENOMEM;
5932
5933
5934
5935
5936 for (queue = 0; queue < port->ntxqs; queue++) {
5937 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5938 struct mvpp2_tx_queue *txq;
5939
5940 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5941 if (!txq) {
5942 err = -ENOMEM;
5943 goto err_free_percpu;
5944 }
5945
5946 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5947 if (!txq->pcpu) {
5948 err = -ENOMEM;
5949 goto err_free_percpu;
5950 }
5951
5952 txq->id = queue_phy_id;
5953 txq->log_id = queue;
5954 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5955 for (thread = 0; thread < priv->nthreads; thread++) {
5956 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5957 txq_pcpu->thread = thread;
5958 }
5959
5960 port->txqs[queue] = txq;
5961 }
5962
5963 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5964 GFP_KERNEL);
5965 if (!port->rxqs) {
5966 err = -ENOMEM;
5967 goto err_free_percpu;
5968 }
5969
5970
5971 for (queue = 0; queue < port->nrxqs; queue++) {
5972 struct mvpp2_rx_queue *rxq;
5973
5974
5975 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5976 if (!rxq) {
5977 err = -ENOMEM;
5978 goto err_free_percpu;
5979 }
5980
5981 rxq->id = port->first_rxq + queue;
5982 rxq->port = port->id;
5983 rxq->logic_rxq = queue;
5984
5985 port->rxqs[queue] = rxq;
5986 }
5987
5988 mvpp2_rx_irqs_setup(port);
5989
5990
5991 for (queue = 0; queue < port->nrxqs; queue++) {
5992 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5993
5994 rxq->size = port->rx_ring_size;
5995 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5996 rxq->time_coal = MVPP2_RX_COAL_USEC;
5997 }
5998
5999 mvpp2_ingress_disable(port);
6000
6001
6002 mvpp2_defaults_set(port);
6003
6004
6005 mvpp2_cls_oversize_rxq_set(port);
6006 mvpp2_cls_port_config(port);
6007
6008 if (mvpp22_rss_is_supported(port))
6009 mvpp22_port_rss_init(port);
6010
6011
6012 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6013
6014
6015 err = mvpp2_swf_bm_pool_init(port);
6016 if (err)
6017 goto err_free_percpu;
6018
6019
6020 mvpp2_read_stats(port);
6021 memset(port->ethtool_stats, 0,
6022 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
6023
6024 return 0;
6025
6026 err_free_percpu:
6027 for (queue = 0; queue < port->ntxqs; queue++) {
6028 if (!port->txqs[queue])
6029 continue;
6030 free_percpu(port->txqs[queue]->pcpu);
6031 }
6032 return err;
6033 }
6034
6035 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6036 unsigned long *flags)
6037 {
6038 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6039 "tx-cpu3" };
6040 int i;
6041
6042 for (i = 0; i < 5; i++)
6043 if (of_property_match_string(port_node, "interrupt-names",
6044 irqs[i]) < 0)
6045 return false;
6046
6047 *flags |= MVPP2_F_DT_COMPAT;
6048 return true;
6049 }
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6060 struct device_node *port_node,
6061 unsigned long *flags)
6062 {
6063 char name[5];
6064 int i;
6065
6066
6067 if (!port_node)
6068 return true;
6069
6070 if (priv->hw_version == MVPP21)
6071 return false;
6072
6073 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6074 return true;
6075
6076 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6077 snprintf(name, 5, "hif%d", i);
6078 if (of_property_match_string(port_node, "interrupt-names",
6079 name) < 0)
6080 return false;
6081 }
6082
6083 return true;
6084 }
6085
6086 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6087 struct fwnode_handle *fwnode,
6088 char **mac_from)
6089 {
6090 struct mvpp2_port *port = netdev_priv(dev);
6091 char hw_mac_addr[ETH_ALEN] = {0};
6092 char fw_mac_addr[ETH_ALEN];
6093
6094 if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
6095 *mac_from = "firmware node";
6096 eth_hw_addr_set(dev, fw_mac_addr);
6097 return;
6098 }
6099
6100 if (priv->hw_version == MVPP21) {
6101 mvpp21_get_mac_address(port, hw_mac_addr);
6102 if (is_valid_ether_addr(hw_mac_addr)) {
6103 *mac_from = "hardware";
6104 eth_hw_addr_set(dev, hw_mac_addr);
6105 return;
6106 }
6107 }
6108
6109 *mac_from = "random";
6110 eth_hw_addr_random(dev);
6111 }
6112
6113 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6114 {
6115 return container_of(config, struct mvpp2_port, phylink_config);
6116 }
6117
6118 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
6119 {
6120 return container_of(pcs, struct mvpp2_port, pcs_xlg);
6121 }
6122
6123 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
6124 {
6125 return container_of(pcs, struct mvpp2_port, pcs_gmac);
6126 }
6127
6128 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
6129 struct phylink_link_state *state)
6130 {
6131 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
6132 u32 val;
6133
6134 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
6135 state->speed = SPEED_5000;
6136 else
6137 state->speed = SPEED_10000;
6138 state->duplex = 1;
6139 state->an_complete = 1;
6140
6141 val = readl(port->base + MVPP22_XLG_STATUS);
6142 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
6143
6144 state->pause = 0;
6145 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6146 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
6147 state->pause |= MLO_PAUSE_TX;
6148 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
6149 state->pause |= MLO_PAUSE_RX;
6150 }
6151
6152 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
6153 unsigned int mode,
6154 phy_interface_t interface,
6155 const unsigned long *advertising,
6156 bool permit_pause_to_mac)
6157 {
6158 return 0;
6159 }
6160
6161 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6162 .pcs_get_state = mvpp2_xlg_pcs_get_state,
6163 .pcs_config = mvpp2_xlg_pcs_config,
6164 };
6165
6166 static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
6167 unsigned long *supported,
6168 const struct phylink_link_state *state)
6169 {
6170
6171
6172
6173
6174 if (phy_interface_mode_is_8023z(state->interface) &&
6175 !phylink_test(state->advertising, Autoneg))
6176 return -EINVAL;
6177
6178 return 0;
6179 }
6180
6181 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
6182 struct phylink_link_state *state)
6183 {
6184 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6185 u32 val;
6186
6187 val = readl(port->base + MVPP2_GMAC_STATUS0);
6188
6189 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
6190 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
6191 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
6192
6193 switch (port->phy_interface) {
6194 case PHY_INTERFACE_MODE_1000BASEX:
6195 state->speed = SPEED_1000;
6196 break;
6197 case PHY_INTERFACE_MODE_2500BASEX:
6198 state->speed = SPEED_2500;
6199 break;
6200 default:
6201 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
6202 state->speed = SPEED_1000;
6203 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
6204 state->speed = SPEED_100;
6205 else
6206 state->speed = SPEED_10;
6207 }
6208
6209 state->pause = 0;
6210 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
6211 state->pause |= MLO_PAUSE_RX;
6212 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
6213 state->pause |= MLO_PAUSE_TX;
6214 }
6215
6216 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
6217 phy_interface_t interface,
6218 const unsigned long *advertising,
6219 bool permit_pause_to_mac)
6220 {
6221 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6222 u32 mask, val, an, old_an, changed;
6223
6224 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
6225 MVPP2_GMAC_IN_BAND_AUTONEG |
6226 MVPP2_GMAC_AN_SPEED_EN |
6227 MVPP2_GMAC_FLOW_CTRL_AUTONEG |
6228 MVPP2_GMAC_AN_DUPLEX_EN;
6229
6230 if (phylink_autoneg_inband(mode)) {
6231 mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
6232 MVPP2_GMAC_CONFIG_GMII_SPEED |
6233 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6234 val = MVPP2_GMAC_IN_BAND_AUTONEG;
6235
6236 if (interface == PHY_INTERFACE_MODE_SGMII) {
6237
6238 val |= MVPP2_GMAC_AN_SPEED_EN |
6239 MVPP2_GMAC_AN_DUPLEX_EN;
6240 } else {
6241
6242 val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
6243 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6244
6245
6246
6247
6248
6249 if (permit_pause_to_mac)
6250 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
6251
6252
6253 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
6254 if (phylink_test(advertising, Pause))
6255 val |= MVPP2_GMAC_FC_ADV_EN;
6256 if (phylink_test(advertising, Asym_Pause))
6257 val |= MVPP2_GMAC_FC_ADV_ASM_EN;
6258 }
6259 } else {
6260 val = 0;
6261 }
6262
6263 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6264 an = (an & ~mask) | val;
6265 changed = an ^ old_an;
6266 if (changed)
6267 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6268
6269
6270 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
6271 }
6272
6273 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
6274 {
6275 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6276 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6277
6278 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6279 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6280 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
6281 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6282 }
6283
6284 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
6285 .pcs_validate = mvpp2_gmac_pcs_validate,
6286 .pcs_get_state = mvpp2_gmac_pcs_get_state,
6287 .pcs_config = mvpp2_gmac_pcs_config,
6288 .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
6289 };
6290
6291 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6292 const struct phylink_link_state *state)
6293 {
6294 u32 val;
6295
6296 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6297 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
6298 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6299 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6300 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6301 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6302 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6303 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6304
6305
6306 do {
6307 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6308 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6309 }
6310
6311 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6312 const struct phylink_link_state *state)
6313 {
6314 u32 old_ctrl0, ctrl0;
6315 u32 old_ctrl2, ctrl2;
6316 u32 old_ctrl4, ctrl4;
6317
6318 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6319 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6320 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6321
6322 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6323 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6324
6325
6326 if (phy_interface_mode_is_8023z(state->interface)) {
6327 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6328 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6329 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6330 MVPP22_CTRL4_DP_CLK_SEL |
6331 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6332 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6333 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6334 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6335 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6336 MVPP22_CTRL4_DP_CLK_SEL |
6337 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6338 } else if (phy_interface_mode_is_rgmii(state->interface)) {
6339 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6340 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6341 MVPP22_CTRL4_SYNC_BYPASS_DIS |
6342 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6343 }
6344
6345
6346 if (!phylink_autoneg_inband(mode)) {
6347
6348
6349
6350 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6351
6352
6353 } else if (phy_interface_mode_is_8023z(state->interface)) {
6354
6355
6356
6357
6358
6359 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6360 }
6361
6362 if (old_ctrl0 != ctrl0)
6363 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6364 if (old_ctrl2 != ctrl2)
6365 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6366 if (old_ctrl4 != ctrl4)
6367 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6368 }
6369
6370 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
6371 phy_interface_t interface)
6372 {
6373 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6374
6375
6376
6377
6378
6379 if (mvpp2_is_xlg(interface))
6380 return &port->pcs_xlg;
6381 else
6382 return &port->pcs_gmac;
6383 }
6384
6385 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6386 phy_interface_t interface)
6387 {
6388 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6389
6390
6391 if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6392 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6393 return -EINVAL;
6394 }
6395
6396 if (port->phy_interface != interface ||
6397 phylink_autoneg_inband(mode)) {
6398
6399
6400
6401
6402
6403
6404 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6405 MVPP2_GMAC_FORCE_LINK_PASS |
6406 MVPP2_GMAC_FORCE_LINK_DOWN,
6407 MVPP2_GMAC_FORCE_LINK_DOWN);
6408
6409 if (mvpp2_port_supports_xlg(port))
6410 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6411 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6412 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6413 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6414 }
6415
6416
6417 mvpp2_port_disable(port);
6418
6419 if (port->phy_interface != interface) {
6420
6421 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6422 MVPP2_GMAC_PORT_RESET_MASK,
6423 MVPP2_GMAC_PORT_RESET_MASK);
6424
6425 if (port->priv->hw_version >= MVPP22) {
6426 mvpp22_gop_mask_irq(port);
6427
6428 phy_power_off(port->comphy);
6429
6430
6431 mvpp22_mode_reconfigure(port, interface);
6432 }
6433 }
6434
6435 return 0;
6436 }
6437
6438 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6439 const struct phylink_link_state *state)
6440 {
6441 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6442
6443
6444 if (mvpp2_is_xlg(state->interface))
6445 mvpp2_xlg_config(port, mode, state);
6446 else if (phy_interface_mode_is_rgmii(state->interface) ||
6447 phy_interface_mode_is_8023z(state->interface) ||
6448 state->interface == PHY_INTERFACE_MODE_SGMII)
6449 mvpp2_gmac_config(port, mode, state);
6450
6451 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6452 mvpp2_port_loopback_set(port, state);
6453 }
6454
6455 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6456 phy_interface_t interface)
6457 {
6458 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6459
6460 if (port->priv->hw_version >= MVPP22 &&
6461 port->phy_interface != interface) {
6462 port->phy_interface = interface;
6463
6464
6465 mvpp22_gop_unmask_irq(port);
6466 }
6467
6468 if (!mvpp2_is_xlg(interface)) {
6469
6470 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6471 MVPP2_GMAC_PORT_RESET_MASK, 0);
6472
6473 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6474 MVPP2_GMAC_PORT_RESET_MASK)
6475 continue;
6476 }
6477
6478 mvpp2_port_enable(port);
6479
6480
6481
6482
6483 if (phylink_autoneg_inband(mode)) {
6484 if (mvpp2_is_xlg(interface))
6485 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6486 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6487 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6488 else
6489 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6490 MVPP2_GMAC_FORCE_LINK_PASS |
6491 MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6492 }
6493
6494 return 0;
6495 }
6496
6497 static void mvpp2_mac_link_up(struct phylink_config *config,
6498 struct phy_device *phy,
6499 unsigned int mode, phy_interface_t interface,
6500 int speed, int duplex,
6501 bool tx_pause, bool rx_pause)
6502 {
6503 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6504 u32 val;
6505 int i;
6506
6507 if (mvpp2_is_xlg(interface)) {
6508 if (!phylink_autoneg_inband(mode)) {
6509 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6510 if (tx_pause)
6511 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6512 if (rx_pause)
6513 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6514
6515 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6516 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6517 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6518 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6519 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6520 }
6521 } else {
6522 if (!phylink_autoneg_inband(mode)) {
6523 val = MVPP2_GMAC_FORCE_LINK_PASS;
6524
6525 if (speed == SPEED_1000 || speed == SPEED_2500)
6526 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6527 else if (speed == SPEED_100)
6528 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6529
6530 if (duplex == DUPLEX_FULL)
6531 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6532
6533 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6534 MVPP2_GMAC_FORCE_LINK_DOWN |
6535 MVPP2_GMAC_FORCE_LINK_PASS |
6536 MVPP2_GMAC_CONFIG_MII_SPEED |
6537 MVPP2_GMAC_CONFIG_GMII_SPEED |
6538 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6539 }
6540
6541
6542
6543
6544
6545 val = 0;
6546 if (tx_pause)
6547 val |= MVPP22_CTRL4_TX_FC_EN;
6548 if (rx_pause)
6549 val |= MVPP22_CTRL4_RX_FC_EN;
6550
6551 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6552 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6553 val);
6554 }
6555
6556 if (port->priv->global_tx_fc) {
6557 port->tx_fc = tx_pause;
6558 if (tx_pause)
6559 mvpp2_rxq_enable_fc(port);
6560 else
6561 mvpp2_rxq_disable_fc(port);
6562 if (port->priv->percpu_pools) {
6563 for (i = 0; i < port->nrxqs; i++)
6564 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
6565 } else {
6566 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
6567 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
6568 }
6569 if (port->priv->hw_version == MVPP23)
6570 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
6571 }
6572
6573 mvpp2_port_enable(port);
6574
6575 mvpp2_egress_enable(port);
6576 mvpp2_ingress_enable(port);
6577 netif_tx_wake_all_queues(port->dev);
6578 }
6579
6580 static void mvpp2_mac_link_down(struct phylink_config *config,
6581 unsigned int mode, phy_interface_t interface)
6582 {
6583 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6584 u32 val;
6585
6586 if (!phylink_autoneg_inband(mode)) {
6587 if (mvpp2_is_xlg(interface)) {
6588 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6589 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6590 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6591 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6592 } else {
6593 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6594 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6595 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6596 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6597 }
6598 }
6599
6600 netif_tx_stop_all_queues(port->dev);
6601 mvpp2_egress_disable(port);
6602 mvpp2_ingress_disable(port);
6603
6604 mvpp2_port_disable(port);
6605 }
6606
6607 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6608 .validate = phylink_generic_validate,
6609 .mac_select_pcs = mvpp2_select_pcs,
6610 .mac_prepare = mvpp2_mac_prepare,
6611 .mac_config = mvpp2_mac_config,
6612 .mac_finish = mvpp2_mac_finish,
6613 .mac_link_up = mvpp2_mac_link_up,
6614 .mac_link_down = mvpp2_mac_link_down,
6615 };
6616
6617
6618 static void mvpp2_acpi_start(struct mvpp2_port *port)
6619 {
6620
6621
6622
6623
6624 struct phylink_link_state state = {
6625 .interface = port->phy_interface,
6626 };
6627 struct phylink_pcs *pcs;
6628
6629 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
6630
6631 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6632 port->phy_interface);
6633 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6634 pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface,
6635 state.advertising, false);
6636 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6637 port->phy_interface);
6638 mvpp2_mac_link_up(&port->phylink_config, NULL,
6639 MLO_AN_INBAND, port->phy_interface,
6640 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6641 }
6642
6643
6644
6645
6646 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6647 {
6648 if (!is_acpi_node(port_fwnode))
6649 return false;
6650
6651 return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6652 !fwnode_property_present(port_fwnode, "managed") &&
6653 !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6654 }
6655
6656
6657 static int mvpp2_port_probe(struct platform_device *pdev,
6658 struct fwnode_handle *port_fwnode,
6659 struct mvpp2 *priv)
6660 {
6661 struct phy *comphy = NULL;
6662 struct mvpp2_port *port;
6663 struct mvpp2_port_pcpu *port_pcpu;
6664 struct device_node *port_node = to_of_node(port_fwnode);
6665 netdev_features_t features;
6666 struct net_device *dev;
6667 struct phylink *phylink;
6668 char *mac_from = "";
6669 unsigned int ntxqs, nrxqs, thread;
6670 unsigned long flags = 0;
6671 bool has_tx_irqs;
6672 u32 id;
6673 int phy_mode;
6674 int err, i;
6675
6676 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6677 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6678 dev_err(&pdev->dev,
6679 "not enough IRQs to support multi queue mode\n");
6680 return -EINVAL;
6681 }
6682
6683 ntxqs = MVPP2_MAX_TXQ;
6684 nrxqs = mvpp2_get_nrxqs(priv);
6685
6686 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6687 if (!dev)
6688 return -ENOMEM;
6689
6690 phy_mode = fwnode_get_phy_mode(port_fwnode);
6691 if (phy_mode < 0) {
6692 dev_err(&pdev->dev, "incorrect phy mode\n");
6693 err = phy_mode;
6694 goto err_free_netdev;
6695 }
6696
6697
6698
6699
6700
6701
6702
6703 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6704 phy_mode = PHY_INTERFACE_MODE_10GBASER;
6705
6706 if (port_node) {
6707 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6708 if (IS_ERR(comphy)) {
6709 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6710 err = -EPROBE_DEFER;
6711 goto err_free_netdev;
6712 }
6713 comphy = NULL;
6714 }
6715 }
6716
6717 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6718 err = -EINVAL;
6719 dev_err(&pdev->dev, "missing port-id value\n");
6720 goto err_free_netdev;
6721 }
6722
6723 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6724 dev->watchdog_timeo = 5 * HZ;
6725 dev->netdev_ops = &mvpp2_netdev_ops;
6726 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6727
6728 port = netdev_priv(dev);
6729 port->dev = dev;
6730 port->fwnode = port_fwnode;
6731 port->ntxqs = ntxqs;
6732 port->nrxqs = nrxqs;
6733 port->priv = priv;
6734 port->has_tx_irqs = has_tx_irqs;
6735 port->flags = flags;
6736
6737 err = mvpp2_queue_vectors_init(port, port_node);
6738 if (err)
6739 goto err_free_netdev;
6740
6741 if (port_node)
6742 port->port_irq = of_irq_get_byname(port_node, "link");
6743 else
6744 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6745 if (port->port_irq == -EPROBE_DEFER) {
6746 err = -EPROBE_DEFER;
6747 goto err_deinit_qvecs;
6748 }
6749 if (port->port_irq <= 0)
6750
6751 port->port_irq = 0;
6752
6753 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6754 port->flags |= MVPP2_F_LOOPBACK;
6755
6756 port->id = id;
6757 if (priv->hw_version == MVPP21)
6758 port->first_rxq = port->id * port->nrxqs;
6759 else
6760 port->first_rxq = port->id * priv->max_port_rxqs;
6761
6762 port->of_node = port_node;
6763 port->phy_interface = phy_mode;
6764 port->comphy = comphy;
6765
6766 if (priv->hw_version == MVPP21) {
6767 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6768 if (IS_ERR(port->base)) {
6769 err = PTR_ERR(port->base);
6770 goto err_free_irq;
6771 }
6772
6773 port->stats_base = port->priv->lms_base +
6774 MVPP21_MIB_COUNTERS_OFFSET +
6775 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6776 } else {
6777 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6778 &port->gop_id)) {
6779 err = -EINVAL;
6780 dev_err(&pdev->dev, "missing gop-port-id value\n");
6781 goto err_deinit_qvecs;
6782 }
6783
6784 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6785 port->stats_base = port->priv->iface_base +
6786 MVPP22_MIB_COUNTERS_OFFSET +
6787 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6788
6789
6790
6791
6792 if (priv->tai)
6793 port->hwtstamp = true;
6794 }
6795
6796
6797 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6798 if (!port->stats) {
6799 err = -ENOMEM;
6800 goto err_free_irq;
6801 }
6802
6803 port->ethtool_stats = devm_kcalloc(&pdev->dev,
6804 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6805 sizeof(u64), GFP_KERNEL);
6806 if (!port->ethtool_stats) {
6807 err = -ENOMEM;
6808 goto err_free_stats;
6809 }
6810
6811 mutex_init(&port->gather_stats_lock);
6812 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6813
6814 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6815
6816 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6817 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6818 SET_NETDEV_DEV(dev, &pdev->dev);
6819
6820 err = mvpp2_port_init(port);
6821 if (err < 0) {
6822 dev_err(&pdev->dev, "failed to init port %d\n", id);
6823 goto err_free_stats;
6824 }
6825
6826 mvpp2_port_periodic_xon_disable(port);
6827
6828 mvpp2_mac_reset_assert(port);
6829 mvpp22_pcs_reset_assert(port);
6830
6831 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6832 if (!port->pcpu) {
6833 err = -ENOMEM;
6834 goto err_free_txq_pcpu;
6835 }
6836
6837 if (!port->has_tx_irqs) {
6838 for (thread = 0; thread < priv->nthreads; thread++) {
6839 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6840
6841 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6842 HRTIMER_MODE_REL_PINNED_SOFT);
6843 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6844 port_pcpu->timer_scheduled = false;
6845 port_pcpu->dev = dev;
6846 }
6847 }
6848
6849 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6850 NETIF_F_TSO;
6851 dev->features = features | NETIF_F_RXCSUM;
6852 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6853 NETIF_F_HW_VLAN_CTAG_FILTER;
6854
6855 if (mvpp22_rss_is_supported(port)) {
6856 dev->hw_features |= NETIF_F_RXHASH;
6857 dev->features |= NETIF_F_NTUPLE;
6858 }
6859
6860 if (!port->priv->percpu_pools)
6861 mvpp2_set_hw_csum(port, port->pool_long->id);
6862
6863 dev->vlan_features |= features;
6864 netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
6865 dev->priv_flags |= IFF_UNICAST_FLT;
6866
6867
6868 dev->min_mtu = ETH_MIN_MTU;
6869
6870 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6871 dev->dev.of_node = port_node;
6872
6873 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
6874 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
6875
6876 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
6877 port->phylink_config.dev = &dev->dev;
6878 port->phylink_config.type = PHYLINK_NETDEV;
6879 port->phylink_config.mac_capabilities =
6880 MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
6881
6882 if (port->priv->global_tx_fc)
6883 port->phylink_config.mac_capabilities |=
6884 MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
6885
6886 if (mvpp2_port_supports_xlg(port)) {
6887
6888
6889
6890 if (comphy) {
6891 __set_bit(PHY_INTERFACE_MODE_5GBASER,
6892 port->phylink_config.supported_interfaces);
6893 __set_bit(PHY_INTERFACE_MODE_10GBASER,
6894 port->phylink_config.supported_interfaces);
6895 __set_bit(PHY_INTERFACE_MODE_XAUI,
6896 port->phylink_config.supported_interfaces);
6897 } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
6898 __set_bit(PHY_INTERFACE_MODE_5GBASER,
6899 port->phylink_config.supported_interfaces);
6900 } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
6901 __set_bit(PHY_INTERFACE_MODE_10GBASER,
6902 port->phylink_config.supported_interfaces);
6903 } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
6904 __set_bit(PHY_INTERFACE_MODE_XAUI,
6905 port->phylink_config.supported_interfaces);
6906 }
6907
6908 if (comphy)
6909 port->phylink_config.mac_capabilities |=
6910 MAC_10000FD | MAC_5000FD;
6911 else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
6912 port->phylink_config.mac_capabilities |=
6913 MAC_5000FD;
6914 else
6915 port->phylink_config.mac_capabilities |=
6916 MAC_10000FD;
6917 }
6918
6919 if (mvpp2_port_supports_rgmii(port))
6920 phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
6921
6922 if (comphy) {
6923
6924
6925
6926 __set_bit(PHY_INTERFACE_MODE_SGMII,
6927 port->phylink_config.supported_interfaces);
6928 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
6929 port->phylink_config.supported_interfaces);
6930 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
6931 port->phylink_config.supported_interfaces);
6932 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
6933
6934 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
6935 port->phylink_config.supported_interfaces);
6936 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
6937 phy_mode == PHY_INTERFACE_MODE_SGMII) {
6938
6939
6940 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
6941 port->phylink_config.supported_interfaces);
6942 __set_bit(PHY_INTERFACE_MODE_SGMII,
6943 port->phylink_config.supported_interfaces);
6944 }
6945
6946 phylink = phylink_create(&port->phylink_config, port_fwnode,
6947 phy_mode, &mvpp2_phylink_ops);
6948 if (IS_ERR(phylink)) {
6949 err = PTR_ERR(phylink);
6950 goto err_free_port_pcpu;
6951 }
6952 port->phylink = phylink;
6953 } else {
6954 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
6955 port->phylink = NULL;
6956 }
6957
6958
6959
6960
6961
6962 if (port->comphy) {
6963 err = mvpp22_comphy_init(port, port->phy_interface);
6964 if (err == 0)
6965 phy_power_off(port->comphy);
6966 }
6967
6968 err = register_netdev(dev);
6969 if (err < 0) {
6970 dev_err(&pdev->dev, "failed to register netdev\n");
6971 goto err_phylink;
6972 }
6973 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6974
6975 priv->port_list[priv->port_count++] = port;
6976
6977 return 0;
6978
6979 err_phylink:
6980 if (port->phylink)
6981 phylink_destroy(port->phylink);
6982 err_free_port_pcpu:
6983 free_percpu(port->pcpu);
6984 err_free_txq_pcpu:
6985 for (i = 0; i < port->ntxqs; i++)
6986 free_percpu(port->txqs[i]->pcpu);
6987 err_free_stats:
6988 free_percpu(port->stats);
6989 err_free_irq:
6990 if (port->port_irq)
6991 irq_dispose_mapping(port->port_irq);
6992 err_deinit_qvecs:
6993 mvpp2_queue_vectors_deinit(port);
6994 err_free_netdev:
6995 free_netdev(dev);
6996 return err;
6997 }
6998
6999
7000 static void mvpp2_port_remove(struct mvpp2_port *port)
7001 {
7002 int i;
7003
7004 unregister_netdev(port->dev);
7005 if (port->phylink)
7006 phylink_destroy(port->phylink);
7007 free_percpu(port->pcpu);
7008 free_percpu(port->stats);
7009 for (i = 0; i < port->ntxqs; i++)
7010 free_percpu(port->txqs[i]->pcpu);
7011 mvpp2_queue_vectors_deinit(port);
7012 if (port->port_irq)
7013 irq_dispose_mapping(port->port_irq);
7014 free_netdev(port->dev);
7015 }
7016
7017
7018 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7019 struct mvpp2 *priv)
7020 {
7021 u32 win_enable;
7022 int i;
7023
7024 for (i = 0; i < 6; i++) {
7025 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7026 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7027
7028 if (i < 4)
7029 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7030 }
7031
7032 win_enable = 0;
7033
7034 for (i = 0; i < dram->num_cs; i++) {
7035 const struct mbus_dram_window *cs = dram->cs + i;
7036
7037 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7038 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7039 dram->mbus_dram_target_id);
7040
7041 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7042 (cs->size - 1) & 0xffff0000);
7043
7044 win_enable |= (1 << i);
7045 }
7046
7047 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7048 }
7049
7050
7051 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7052 {
7053 int port;
7054
7055 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7056 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7057 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7058 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7059 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7060 }
7061
7062 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7063 MVPP2_RX_FIFO_PORT_MIN_PKT);
7064 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7065 }
7066
7067 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
7068 {
7069 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
7070
7071 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
7072 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
7073 }
7074
7075
7076
7077
7078
7079
7080
7081 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7082 {
7083 int remaining_ports_count;
7084 unsigned long port_map;
7085 int size_remainder;
7086 int port, size;
7087
7088
7089 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7090 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7091 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7092
7093
7094 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7095 mvpp22_rx_fifo_set_hw(priv, port, 0);
7096
7097
7098 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
7099 remaining_ports_count = hweight_long(port_map);
7100
7101 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7102 if (remaining_ports_count == 1)
7103 size = size_remainder;
7104 else if (port == 0)
7105 size = max(size_remainder / remaining_ports_count,
7106 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
7107 else if (port == 1)
7108 size = max(size_remainder / remaining_ports_count,
7109 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
7110 else
7111 size = size_remainder / remaining_ports_count;
7112
7113 size_remainder -= size;
7114 remaining_ports_count--;
7115
7116 mvpp22_rx_fifo_set_hw(priv, port, size);
7117 }
7118
7119 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7120 MVPP2_RX_FIFO_PORT_MIN_PKT);
7121 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7122 }
7123
7124
7125 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7126 {
7127 int port, val;
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7139 if (port == 0) {
7140 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7141 << MVPP2_RX_FC_TRSH_OFFS;
7142 val &= MVPP2_RX_FC_TRSH_MASK;
7143 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7144 } else if (port == 1) {
7145 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7146 << MVPP2_RX_FC_TRSH_OFFS;
7147 val &= MVPP2_RX_FC_TRSH_MASK;
7148 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7149 } else {
7150 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7151 << MVPP2_RX_FC_TRSH_OFFS;
7152 val &= MVPP2_RX_FC_TRSH_MASK;
7153 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7154 }
7155 }
7156 }
7157
7158
7159 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7160 {
7161 int val;
7162
7163 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7164
7165 if (en)
7166 val |= MVPP2_RX_FC_EN;
7167 else
7168 val &= ~MVPP2_RX_FC_EN;
7169
7170 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7171 }
7172
7173 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
7174 {
7175 int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
7176
7177 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
7178 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
7179 }
7180
7181
7182
7183
7184
7185
7186
7187 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7188 {
7189 int remaining_ports_count;
7190 unsigned long port_map;
7191 int size_remainder;
7192 int port, size;
7193
7194
7195 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7196 MVPP22_TX_FIFO_DATA_SIZE_1KB);
7197 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7198
7199
7200 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7201 mvpp22_tx_fifo_set_hw(priv, port, 0);
7202
7203
7204 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
7205 remaining_ports_count = hweight_long(port_map);
7206
7207 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7208 if (remaining_ports_count == 1)
7209 size = min(size_remainder,
7210 MVPP22_TX_FIFO_DATA_SIZE_10KB);
7211 else if (port == 0)
7212 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
7213 else
7214 size = size_remainder / remaining_ports_count;
7215
7216 size_remainder -= size;
7217 remaining_ports_count--;
7218
7219 mvpp22_tx_fifo_set_hw(priv, port, size);
7220 }
7221 }
7222
7223 static void mvpp2_axi_init(struct mvpp2 *priv)
7224 {
7225 u32 val, rdval, wrval;
7226
7227 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7228
7229
7230
7231 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7232 << MVPP22_AXI_ATTR_CACHE_OFFS;
7233 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7234 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7235
7236 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7237 << MVPP22_AXI_ATTR_CACHE_OFFS;
7238 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7239 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7240
7241
7242 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7243 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7244
7245
7246 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7247 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7248 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7249 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7250
7251
7252 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7253 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7254
7255 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7256 << MVPP22_AXI_CODE_CACHE_OFFS;
7257 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7258 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7259 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7260 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7261
7262 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7263 << MVPP22_AXI_CODE_CACHE_OFFS;
7264 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7265 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7266
7267 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7268
7269 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7270 << MVPP22_AXI_CODE_CACHE_OFFS;
7271 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7272 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7273
7274 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7275 }
7276
7277
7278 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7279 {
7280 const struct mbus_dram_target_info *dram_target_info;
7281 int err, i;
7282 u32 val;
7283
7284
7285 dram_target_info = mv_mbus_dram_info();
7286 if (dram_target_info)
7287 mvpp2_conf_mbus_windows(dram_target_info, priv);
7288
7289 if (priv->hw_version >= MVPP22)
7290 mvpp2_axi_init(priv);
7291
7292
7293 if (priv->hw_version == MVPP21) {
7294 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7295 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7296 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7297 } else {
7298 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7299 val &= ~MVPP22_SMI_POLLING_EN;
7300 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7301 }
7302
7303
7304 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7305 sizeof(*priv->aggr_txqs),
7306 GFP_KERNEL);
7307 if (!priv->aggr_txqs)
7308 return -ENOMEM;
7309
7310 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7311 priv->aggr_txqs[i].id = i;
7312 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7313 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7314 if (err < 0)
7315 return err;
7316 }
7317
7318
7319 if (priv->hw_version == MVPP21) {
7320 mvpp2_rx_fifo_init(priv);
7321 } else {
7322 mvpp22_rx_fifo_init(priv);
7323 mvpp22_tx_fifo_init(priv);
7324 if (priv->hw_version == MVPP23)
7325 mvpp23_rx_fifo_fc_set_tresh(priv);
7326 }
7327
7328 if (priv->hw_version == MVPP21)
7329 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7330 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7331
7332
7333 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7334
7335
7336 err = mvpp2_bm_init(&pdev->dev, priv);
7337 if (err < 0)
7338 return err;
7339
7340
7341 err = mvpp2_prs_default_init(pdev, priv);
7342 if (err < 0)
7343 return err;
7344
7345
7346 mvpp2_cls_init(priv);
7347
7348 return 0;
7349 }
7350
7351 static int mvpp2_get_sram(struct platform_device *pdev,
7352 struct mvpp2 *priv)
7353 {
7354 struct resource *res;
7355
7356 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7357 if (!res) {
7358 if (has_acpi_companion(&pdev->dev))
7359 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7360 else
7361 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7362 return 0;
7363 }
7364
7365 priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
7366
7367 return PTR_ERR_OR_ZERO(priv->cm3_base);
7368 }
7369
7370 static int mvpp2_probe(struct platform_device *pdev)
7371 {
7372 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7373 struct fwnode_handle *port_fwnode;
7374 struct mvpp2 *priv;
7375 struct resource *res;
7376 void __iomem *base;
7377 int i, shared;
7378 int err;
7379
7380 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7381 if (!priv)
7382 return -ENOMEM;
7383
7384 priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7385
7386
7387
7388
7389 if (priv->hw_version == MVPP21)
7390 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7391
7392 base = devm_platform_ioremap_resource(pdev, 0);
7393 if (IS_ERR(base))
7394 return PTR_ERR(base);
7395
7396 if (priv->hw_version == MVPP21) {
7397 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7398 if (IS_ERR(priv->lms_base))
7399 return PTR_ERR(priv->lms_base);
7400 } else {
7401 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7402 if (!res) {
7403 dev_err(&pdev->dev, "Invalid resource\n");
7404 return -EINVAL;
7405 }
7406 if (has_acpi_companion(&pdev->dev)) {
7407
7408
7409
7410
7411
7412
7413
7414
7415 release_resource(res);
7416 }
7417 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7418 if (IS_ERR(priv->iface_base))
7419 return PTR_ERR(priv->iface_base);
7420
7421
7422 err = mvpp2_get_sram(pdev, priv);
7423 if (err)
7424 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7425
7426
7427 if (priv->cm3_base)
7428 priv->global_tx_fc = true;
7429 }
7430
7431 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7432 priv->sysctrl_base =
7433 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7434 "marvell,system-controller");
7435 if (IS_ERR(priv->sysctrl_base))
7436
7437
7438
7439
7440
7441 priv->sysctrl_base = NULL;
7442 }
7443
7444 if (priv->hw_version >= MVPP22 &&
7445 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
7446 priv->percpu_pools = 1;
7447
7448 mvpp2_setup_bm_pool();
7449
7450
7451 priv->nthreads = min_t(unsigned int, num_present_cpus(),
7452 MVPP2_MAX_THREADS);
7453
7454 shared = num_present_cpus() - priv->nthreads;
7455 if (shared > 0)
7456 bitmap_set(&priv->lock_map, 0,
7457 min_t(int, shared, MVPP2_MAX_THREADS));
7458
7459 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7460 u32 addr_space_sz;
7461
7462 addr_space_sz = (priv->hw_version == MVPP21 ?
7463 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7464 priv->swth_base[i] = base + i * addr_space_sz;
7465 }
7466
7467 if (priv->hw_version == MVPP21)
7468 priv->max_port_rxqs = 8;
7469 else
7470 priv->max_port_rxqs = 32;
7471
7472 if (dev_of_node(&pdev->dev)) {
7473 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7474 if (IS_ERR(priv->pp_clk))
7475 return PTR_ERR(priv->pp_clk);
7476 err = clk_prepare_enable(priv->pp_clk);
7477 if (err < 0)
7478 return err;
7479
7480 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7481 if (IS_ERR(priv->gop_clk)) {
7482 err = PTR_ERR(priv->gop_clk);
7483 goto err_pp_clk;
7484 }
7485 err = clk_prepare_enable(priv->gop_clk);
7486 if (err < 0)
7487 goto err_pp_clk;
7488
7489 if (priv->hw_version >= MVPP22) {
7490 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7491 if (IS_ERR(priv->mg_clk)) {
7492 err = PTR_ERR(priv->mg_clk);
7493 goto err_gop_clk;
7494 }
7495
7496 err = clk_prepare_enable(priv->mg_clk);
7497 if (err < 0)
7498 goto err_gop_clk;
7499
7500 priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7501 if (IS_ERR(priv->mg_core_clk)) {
7502 err = PTR_ERR(priv->mg_core_clk);
7503 goto err_mg_clk;
7504 }
7505
7506 err = clk_prepare_enable(priv->mg_core_clk);
7507 if (err < 0)
7508 goto err_mg_clk;
7509 }
7510
7511 priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7512 if (IS_ERR(priv->axi_clk)) {
7513 err = PTR_ERR(priv->axi_clk);
7514 goto err_mg_core_clk;
7515 }
7516
7517 err = clk_prepare_enable(priv->axi_clk);
7518 if (err < 0)
7519 goto err_mg_core_clk;
7520
7521
7522 priv->tclk = clk_get_rate(priv->pp_clk);
7523 } else {
7524 err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
7525 if (err) {
7526 dev_err(&pdev->dev, "missing clock-frequency value\n");
7527 return err;
7528 }
7529 }
7530
7531 if (priv->hw_version >= MVPP22) {
7532 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7533 if (err)
7534 goto err_axi_clk;
7535
7536
7537
7538
7539
7540 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7541 if (err)
7542 goto err_axi_clk;
7543 }
7544
7545
7546 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7547 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7548 priv->port_map |= BIT(i);
7549 }
7550
7551 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
7552 priv->hw_version = MVPP23;
7553
7554
7555 spin_lock_init(&priv->mss_spinlock);
7556
7557
7558 err = mvpp2_init(pdev, priv);
7559 if (err < 0) {
7560 dev_err(&pdev->dev, "failed to initialize controller\n");
7561 goto err_axi_clk;
7562 }
7563
7564 err = mvpp22_tai_probe(&pdev->dev, priv);
7565 if (err < 0)
7566 goto err_axi_clk;
7567
7568
7569 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7570 err = mvpp2_port_probe(pdev, port_fwnode, priv);
7571 if (err < 0)
7572 goto err_port_probe;
7573 }
7574
7575 if (priv->port_count == 0) {
7576 dev_err(&pdev->dev, "no ports enabled\n");
7577 err = -ENODEV;
7578 goto err_axi_clk;
7579 }
7580
7581
7582
7583
7584
7585
7586
7587 snprintf(priv->queue_name, sizeof(priv->queue_name),
7588 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7589 priv->port_count > 1 ? "+" : "");
7590 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7591 if (!priv->stats_queue) {
7592 err = -ENOMEM;
7593 goto err_port_probe;
7594 }
7595
7596 if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
7597 err = mvpp2_enable_global_fc(priv);
7598 if (err)
7599 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7600 }
7601
7602 mvpp2_dbgfs_init(priv, pdev->name);
7603
7604 platform_set_drvdata(pdev, priv);
7605 return 0;
7606
7607 err_port_probe:
7608 fwnode_handle_put(port_fwnode);
7609
7610 i = 0;
7611 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7612 if (priv->port_list[i])
7613 mvpp2_port_remove(priv->port_list[i]);
7614 i++;
7615 }
7616 err_axi_clk:
7617 clk_disable_unprepare(priv->axi_clk);
7618 err_mg_core_clk:
7619 clk_disable_unprepare(priv->mg_core_clk);
7620 err_mg_clk:
7621 clk_disable_unprepare(priv->mg_clk);
7622 err_gop_clk:
7623 clk_disable_unprepare(priv->gop_clk);
7624 err_pp_clk:
7625 clk_disable_unprepare(priv->pp_clk);
7626 return err;
7627 }
7628
7629 static int mvpp2_remove(struct platform_device *pdev)
7630 {
7631 struct mvpp2 *priv = platform_get_drvdata(pdev);
7632 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7633 int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7634 struct fwnode_handle *port_fwnode;
7635
7636 mvpp2_dbgfs_cleanup(priv);
7637
7638 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7639 if (priv->port_list[i]) {
7640 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7641 mvpp2_port_remove(priv->port_list[i]);
7642 }
7643 i++;
7644 }
7645
7646 destroy_workqueue(priv->stats_queue);
7647
7648 if (priv->percpu_pools)
7649 poolnum = mvpp2_get_nrxqs(priv) * 2;
7650
7651 for (i = 0; i < poolnum; i++) {
7652 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7653
7654 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7655 }
7656
7657 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7658 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7659
7660 dma_free_coherent(&pdev->dev,
7661 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7662 aggr_txq->descs,
7663 aggr_txq->descs_dma);
7664 }
7665
7666 if (is_acpi_node(port_fwnode))
7667 return 0;
7668
7669 clk_disable_unprepare(priv->axi_clk);
7670 clk_disable_unprepare(priv->mg_core_clk);
7671 clk_disable_unprepare(priv->mg_clk);
7672 clk_disable_unprepare(priv->pp_clk);
7673 clk_disable_unprepare(priv->gop_clk);
7674
7675 return 0;
7676 }
7677
7678 static const struct of_device_id mvpp2_match[] = {
7679 {
7680 .compatible = "marvell,armada-375-pp2",
7681 .data = (void *)MVPP21,
7682 },
7683 {
7684 .compatible = "marvell,armada-7k-pp22",
7685 .data = (void *)MVPP22,
7686 },
7687 { }
7688 };
7689 MODULE_DEVICE_TABLE(of, mvpp2_match);
7690
7691 #ifdef CONFIG_ACPI
7692 static const struct acpi_device_id mvpp2_acpi_match[] = {
7693 { "MRVL0110", MVPP22 },
7694 { },
7695 };
7696 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7697 #endif
7698
7699 static struct platform_driver mvpp2_driver = {
7700 .probe = mvpp2_probe,
7701 .remove = mvpp2_remove,
7702 .driver = {
7703 .name = MVPP2_DRIVER_NAME,
7704 .of_match_table = mvpp2_match,
7705 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7706 },
7707 };
7708
7709 module_platform_driver(mvpp2_driver);
7710
7711 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7712 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7713 MODULE_LICENSE("GPL v2");