0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/kernel.h>
0010 #include <linux/cache.h>
0011 #include <linux/cpumask.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/ip.h>
0015 #include <linux/string.h>
0016 #include <linux/prefetch.h>
0017 #include <linux/ratelimit.h>
0018 #include <linux/smp.h>
0019 #include <linux/interrupt.h>
0020 #include <net/dst.h>
0021 #ifdef CONFIG_XFRM
0022 #include <linux/xfrm.h>
0023 #include <net/xfrm.h>
0024 #endif
0025
0026 #include "octeon-ethernet.h"
0027 #include "ethernet-defines.h"
0028 #include "ethernet-mem.h"
0029 #include "ethernet-rx.h"
0030 #include "ethernet-util.h"
0031
0032 static atomic_t oct_rx_ready = ATOMIC_INIT(0);
0033
0034 static struct oct_rx_group {
0035 int irq;
0036 int group;
0037 struct napi_struct napi;
0038 } oct_rx_group[16];
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048 static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
0049 {
0050
0051 disable_irq_nosync(irq);
0052 napi_schedule(napi_id);
0053
0054 return IRQ_HANDLED;
0055 }
0056
0057
0058
0059
0060
0061
0062
0063 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
0064 {
0065 int port;
0066
0067 if (octeon_has_feature(OCTEON_FEATURE_PKND))
0068 port = work->word0.pip.cn68xx.pknd;
0069 else
0070 port = work->word1.cn38xx.ipprt;
0071
0072 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
0073
0074
0075
0076
0077
0078
0079 return 0;
0080
0081 if (work->word2.snoip.err_code == 5 ||
0082 work->word2.snoip.err_code == 7) {
0083
0084
0085
0086
0087
0088
0089
0090
0091 int interface = cvmx_helper_get_interface_num(port);
0092 int index = cvmx_helper_get_interface_index_num(port);
0093 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
0094
0095 gmxx_rxx_frm_ctl.u64 =
0096 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
0097 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
0098 u8 *ptr =
0099 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
0100 int i = 0;
0101
0102 while (i < work->word1.len - 1) {
0103 if (*ptr != 0x55)
0104 break;
0105 ptr++;
0106 i++;
0107 }
0108
0109 if (*ptr == 0xd5) {
0110
0111 work->packet_ptr.s.addr += i + 1;
0112 work->word1.len -= i + 5;
0113 return 0;
0114 }
0115
0116 if ((*ptr & 0xf) == 0xd) {
0117
0118 work->packet_ptr.s.addr += i;
0119 work->word1.len -= i + 4;
0120 for (i = 0; i < work->word1.len; i++) {
0121 *ptr =
0122 ((*ptr & 0xf0) >> 4) |
0123 ((*(ptr + 1) & 0xf) << 4);
0124 ptr++;
0125 }
0126 return 0;
0127 }
0128
0129 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
0130 port);
0131 cvm_oct_free_work(work);
0132 return 1;
0133 }
0134 }
0135
0136 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
0137 port, work->word2.snoip.err_code);
0138 cvm_oct_free_work(work);
0139 return 1;
0140 }
0141
0142 static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
0143 {
0144 int segments = work->word2.s.bufs;
0145 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
0146 int len = work->word1.len;
0147 int segment_size;
0148
0149 while (segments--) {
0150 union cvmx_buf_ptr next_ptr;
0151
0152 next_ptr = *(union cvmx_buf_ptr *)
0153 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 segment_size =
0165 CVMX_FPA_PACKET_POOL_SIZE -
0166 (segment_ptr.s.addr -
0167 (((segment_ptr.s.addr >> 7) -
0168 segment_ptr.s.back) << 7));
0169
0170
0171 if (segment_size > len)
0172 segment_size = len;
0173
0174
0175 skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
0176 segment_size);
0177 len -= segment_size;
0178 segment_ptr = next_ptr;
0179 }
0180 }
0181
0182 static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
0183 {
0184 const int coreid = cvmx_get_core_num();
0185 u64 old_group_mask;
0186 u64 old_scratch;
0187 int rx_count = 0;
0188 int did_work_request = 0;
0189 int packet_not_copied;
0190
0191
0192 prefetch(cvm_oct_device);
0193
0194 if (USE_ASYNC_IOBDMA) {
0195
0196 CVMX_SYNCIOBDMA;
0197 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
0198 }
0199
0200
0201 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
0202 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
0203 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
0204 BIT(rx_group->group));
0205 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
0206 } else {
0207 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
0208 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
0209 (old_group_mask & ~0xFFFFull) |
0210 BIT(rx_group->group));
0211 }
0212
0213 if (USE_ASYNC_IOBDMA) {
0214 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
0215 did_work_request = 1;
0216 }
0217
0218 while (rx_count < budget) {
0219 struct sk_buff *skb = NULL;
0220 struct sk_buff **pskb = NULL;
0221 int skb_in_hw;
0222 struct cvmx_wqe *work;
0223 int port;
0224
0225 if (USE_ASYNC_IOBDMA && did_work_request)
0226 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
0227 else
0228 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
0229
0230 prefetch(work);
0231 did_work_request = 0;
0232 if (!work) {
0233 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
0234 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
0235 BIT(rx_group->group));
0236 cvmx_write_csr(CVMX_SSO_WQ_INT,
0237 BIT(rx_group->group));
0238 } else {
0239 union cvmx_pow_wq_int wq_int;
0240
0241 wq_int.u64 = 0;
0242 wq_int.s.iq_dis = BIT(rx_group->group);
0243 wq_int.s.wq_int = BIT(rx_group->group);
0244 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
0245 }
0246 break;
0247 }
0248 pskb = (struct sk_buff **)
0249 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
0250 sizeof(void *));
0251 prefetch(pskb);
0252
0253 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
0254 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
0255 CVMX_POW_NO_WAIT);
0256 did_work_request = 1;
0257 }
0258 rx_count++;
0259
0260 skb_in_hw = work->word2.s.bufs == 1;
0261 if (likely(skb_in_hw)) {
0262 skb = *pskb;
0263 prefetch(&skb->head);
0264 prefetch(&skb->len);
0265 }
0266
0267 if (octeon_has_feature(OCTEON_FEATURE_PKND))
0268 port = work->word0.pip.cn68xx.pknd;
0269 else
0270 port = work->word1.cn38xx.ipprt;
0271
0272 prefetch(cvm_oct_device[port]);
0273
0274
0275 if (unlikely(work->word2.snoip.rcv_error)) {
0276 if (cvm_oct_check_rcv_error(work))
0277 continue;
0278 }
0279
0280
0281
0282
0283
0284
0285 if (likely(skb_in_hw)) {
0286 skb->data = skb->head + work->packet_ptr.s.addr -
0287 cvmx_ptr_to_phys(skb->head);
0288 prefetch(skb->data);
0289 skb->len = work->word1.len;
0290 skb_set_tail_pointer(skb, skb->len);
0291 packet_not_copied = 1;
0292 } else {
0293
0294
0295
0296
0297 skb = dev_alloc_skb(work->word1.len);
0298 if (!skb) {
0299 cvm_oct_free_work(work);
0300 continue;
0301 }
0302
0303
0304
0305
0306
0307 if (unlikely(work->word2.s.bufs == 0)) {
0308 u8 *ptr = work->packet_data;
0309
0310 if (likely(!work->word2.s.not_IP)) {
0311
0312
0313
0314
0315 if (work->word2.s.is_v6)
0316 ptr += 2;
0317 else
0318 ptr += 6;
0319 }
0320 skb_put_data(skb, ptr, work->word1.len);
0321
0322 } else {
0323 copy_segments_to_skb(work, skb);
0324 }
0325 packet_not_copied = 0;
0326 }
0327 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
0328 cvm_oct_device[port])) {
0329 struct net_device *dev = cvm_oct_device[port];
0330
0331
0332
0333
0334
0335 if (likely(dev->flags & IFF_UP)) {
0336 skb->protocol = eth_type_trans(skb, dev);
0337 skb->dev = dev;
0338
0339 if (unlikely(work->word2.s.not_IP ||
0340 work->word2.s.IP_exc ||
0341 work->word2.s.L4_error ||
0342 !work->word2.s.tcp_or_udp))
0343 skb->ip_summed = CHECKSUM_NONE;
0344 else
0345 skb->ip_summed = CHECKSUM_UNNECESSARY;
0346
0347
0348 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
0349 dev->stats.rx_packets++;
0350 dev->stats.rx_bytes += skb->len;
0351 }
0352 netif_receive_skb(skb);
0353 } else {
0354
0355
0356
0357
0358 dev->stats.rx_dropped++;
0359 dev_kfree_skb_irq(skb);
0360 }
0361 } else {
0362
0363
0364
0365
0366 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
0367 port);
0368 dev_kfree_skb_irq(skb);
0369 }
0370
0371
0372
0373
0374 if (likely(packet_not_copied)) {
0375
0376
0377
0378
0379
0380 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
0381 1);
0382
0383 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
0384 } else {
0385 cvm_oct_free_work(work);
0386 }
0387 }
0388
0389 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
0390 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
0391 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
0392 } else {
0393 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
0394 }
0395
0396 if (USE_ASYNC_IOBDMA) {
0397
0398 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
0399 }
0400 cvm_oct_rx_refill_pool(0);
0401
0402 return rx_count;
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
0413 {
0414 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
0415 napi);
0416 int rx_count;
0417
0418 rx_count = cvm_oct_poll(rx_group, budget);
0419
0420 if (rx_count < budget) {
0421
0422 napi_complete_done(napi, rx_count);
0423 enable_irq(rx_group->irq);
0424 }
0425 return rx_count;
0426 }
0427
0428 #ifdef CONFIG_NET_POLL_CONTROLLER
0429
0430
0431
0432
0433
0434
0435 void cvm_oct_poll_controller(struct net_device *dev)
0436 {
0437 int i;
0438
0439 if (!atomic_read(&oct_rx_ready))
0440 return;
0441
0442 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
0443 if (!(pow_receive_groups & BIT(i)))
0444 continue;
0445
0446 cvm_oct_poll(&oct_rx_group[i], 16);
0447 }
0448 }
0449 #endif
0450
0451 void cvm_oct_rx_initialize(void)
0452 {
0453 int i;
0454 struct net_device *dev_for_napi = NULL;
0455
0456 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
0457 if (cvm_oct_device[i]) {
0458 dev_for_napi = cvm_oct_device[i];
0459 break;
0460 }
0461 }
0462
0463 if (!dev_for_napi)
0464 panic("No net_devices were allocated.");
0465
0466 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
0467 int ret;
0468
0469 if (!(pow_receive_groups & BIT(i)))
0470 continue;
0471
0472 netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi,
0473 cvm_oct_napi_poll, rx_napi_weight);
0474 napi_enable(&oct_rx_group[i].napi);
0475
0476 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
0477 oct_rx_group[i].group = i;
0478
0479
0480 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
0481 "Ethernet", &oct_rx_group[i].napi);
0482 if (ret)
0483 panic("Could not acquire Ethernet IRQ %d\n",
0484 oct_rx_group[i].irq);
0485
0486 disable_irq_nosync(oct_rx_group[i].irq);
0487
0488
0489 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
0490 union cvmx_sso_wq_int_thrx int_thr;
0491 union cvmx_pow_wq_int_pc int_pc;
0492
0493 int_thr.u64 = 0;
0494 int_thr.s.tc_en = 1;
0495 int_thr.s.tc_thr = 1;
0496 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
0497
0498 int_pc.u64 = 0;
0499 int_pc.s.pc_thr = 5;
0500 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
0501 } else {
0502 union cvmx_pow_wq_int_thrx int_thr;
0503 union cvmx_pow_wq_int_pc int_pc;
0504
0505 int_thr.u64 = 0;
0506 int_thr.s.tc_en = 1;
0507 int_thr.s.tc_thr = 1;
0508 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
0509
0510 int_pc.u64 = 0;
0511 int_pc.s.pc_thr = 5;
0512 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
0513 }
0514
0515
0516
0517
0518 napi_schedule(&oct_rx_group[i].napi);
0519 }
0520 atomic_inc(&oct_rx_ready);
0521 }
0522
0523 void cvm_oct_rx_shutdown(void)
0524 {
0525 int i;
0526
0527 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
0528 if (!(pow_receive_groups & BIT(i)))
0529 continue;
0530
0531
0532 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
0533 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
0534 else
0535 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
0536
0537
0538 free_irq(oct_rx_group[i].irq, cvm_oct_device);
0539
0540 netif_napi_del(&oct_rx_group[i].napi);
0541 }
0542 }