0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/kernel.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/ip.h>
0013 #include <linux/ratelimit.h>
0014 #include <linux/string.h>
0015 #include <linux/interrupt.h>
0016 #include <net/dst.h>
0017 #ifdef CONFIG_XFRM
0018 #include <linux/xfrm.h>
0019 #include <net/xfrm.h>
0020 #endif
0021
0022 #include <linux/atomic.h>
0023 #include <net/sch_generic.h>
0024
0025 #include "octeon-ethernet.h"
0026 #include "ethernet-defines.h"
0027 #include "ethernet-tx.h"
0028 #include "ethernet-util.h"
0029
0030 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
0031
0032
0033
0034
0035
0036
0037
0038
0039 #ifndef GET_SKBUFF_QOS
0040 #define GET_SKBUFF_QOS(skb) 0
0041 #endif
0042
0043 static void cvm_oct_tx_do_cleanup(unsigned long arg);
0044 static DECLARE_TASKLET_OLD(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup);
0045
0046
0047 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
0048
0049 static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
0050 {
0051 int undo;
0052
0053 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
0054 MAX_SKB_TO_FREE;
0055 if (undo > 0)
0056 cvmx_fau_atomic_add32(fau, -undo);
0057 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
0058 -skb_to_free;
0059 return skb_to_free;
0060 }
0061
0062 static void cvm_oct_kick_tx_poll_watchdog(void)
0063 {
0064 union cvmx_ciu_timx ciu_timx;
0065
0066 ciu_timx.u64 = 0;
0067 ciu_timx.s.one_shot = 1;
0068 ciu_timx.s.len = cvm_oct_tx_poll_interval;
0069 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
0070 }
0071
0072 static void cvm_oct_free_tx_skbs(struct net_device *dev)
0073 {
0074 int skb_to_free;
0075 int qos, queues_per_port;
0076 int total_freed = 0;
0077 int total_remaining = 0;
0078 unsigned long flags;
0079 struct octeon_ethernet *priv = netdev_priv(dev);
0080
0081 queues_per_port = cvmx_pko_get_num_queues(priv->port);
0082
0083 for (qos = 0; qos < queues_per_port; qos++) {
0084 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
0085 continue;
0086 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
0087 MAX_SKB_TO_FREE);
0088 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
0089 priv->fau + qos * 4);
0090 total_freed += skb_to_free;
0091 if (skb_to_free > 0) {
0092 struct sk_buff *to_free_list = NULL;
0093
0094 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
0095 while (skb_to_free > 0) {
0096 struct sk_buff *t;
0097
0098 t = __skb_dequeue(&priv->tx_free_list[qos]);
0099 t->next = to_free_list;
0100 to_free_list = t;
0101 skb_to_free--;
0102 }
0103 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
0104 flags);
0105
0106 while (to_free_list) {
0107 struct sk_buff *t = to_free_list;
0108
0109 to_free_list = to_free_list->next;
0110 dev_kfree_skb_any(t);
0111 }
0112 }
0113 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
0114 }
0115 if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
0116 netif_wake_queue(dev);
0117 if (total_remaining)
0118 cvm_oct_kick_tx_poll_watchdog();
0119 }
0120
0121
0122
0123
0124
0125
0126
0127
0128 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
0129 {
0130 union cvmx_pko_command_word0 pko_command;
0131 union cvmx_buf_ptr hw_buffer;
0132 u64 old_scratch;
0133 u64 old_scratch2;
0134 int qos;
0135 int i;
0136 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
0137 struct octeon_ethernet *priv = netdev_priv(dev);
0138 struct sk_buff *to_free_list;
0139 int skb_to_free;
0140 int buffers_to_free;
0141 u32 total_to_clean;
0142 unsigned long flags;
0143 #if REUSE_SKBUFFS_WITHOUT_FREE
0144 unsigned char *fpa_head;
0145 #endif
0146
0147
0148
0149
0150
0151 prefetch(priv);
0152
0153
0154
0155
0156
0157
0158 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
0159 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
0160 qos = GET_SKBUFF_QOS(skb);
0161 if (qos <= 0)
0162 qos = 0;
0163 else if (qos >= cvmx_pko_get_num_queues(priv->port))
0164 qos = 0;
0165 } else {
0166 qos = 0;
0167 }
0168
0169 if (USE_ASYNC_IOBDMA) {
0170
0171 CVMX_SYNCIOBDMA;
0172 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
0173 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
0174
0175
0176
0177
0178
0179 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
0180 FAU_NUM_PACKET_BUFFERS_TO_FREE,
0181 0);
0182 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
0183 priv->fau + qos * 4,
0184 MAX_SKB_TO_FREE);
0185 }
0186
0187
0188
0189
0190
0191 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
0192 if (unlikely(__skb_linearize(skb))) {
0193 queue_type = QUEUE_DROP;
0194 if (USE_ASYNC_IOBDMA) {
0195
0196
0197
0198
0199 CVMX_SYNCIOBDMA;
0200 skb_to_free =
0201 cvmx_scratch_read64(CVMX_SCR_SCRATCH);
0202 } else {
0203
0204
0205
0206
0207 skb_to_free =
0208 cvmx_fau_fetch_and_add32(priv->fau +
0209 qos * 4,
0210 MAX_SKB_TO_FREE);
0211 }
0212 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
0213 priv->fau +
0214 qos * 4);
0215 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
0216 goto skip_xmit;
0217 }
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
0230 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
0231 int interface = INTERFACE(priv->port);
0232 int index = INDEX(priv->port);
0233
0234 if (interface < 2) {
0235
0236 gmx_prt_cfg.u64 =
0237 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
0238 if (gmx_prt_cfg.s.duplex == 0) {
0239 int add_bytes = 64 - skb->len;
0240
0241 if ((skb_tail_pointer(skb) + add_bytes) <=
0242 skb_end_pointer(skb))
0243 __skb_put_zero(skb, add_bytes);
0244 }
0245 }
0246 }
0247
0248
0249 pko_command.u64 = 0;
0250 #ifdef __LITTLE_ENDIAN
0251 pko_command.s.le = 1;
0252 #endif
0253 pko_command.s.n2 = 1;
0254 pko_command.s.segs = 1;
0255 pko_command.s.total_bytes = skb->len;
0256 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
0257 pko_command.s.subone0 = 1;
0258
0259 pko_command.s.dontfree = 1;
0260
0261
0262 hw_buffer.u64 = 0;
0263 if (skb_shinfo(skb)->nr_frags == 0) {
0264 hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
0265 hw_buffer.s.pool = 0;
0266 hw_buffer.s.size = skb->len;
0267 } else {
0268 hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
0269 hw_buffer.s.pool = 0;
0270 hw_buffer.s.size = skb_headlen(skb);
0271 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
0272 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0273 skb_frag_t *fs = skb_shinfo(skb)->frags + i;
0274
0275 hw_buffer.s.addr =
0276 XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
0277 hw_buffer.s.size = skb_frag_size(fs);
0278 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
0279 }
0280 hw_buffer.s.addr =
0281 XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
0282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
0283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
0284 pko_command.s.gather = 1;
0285 goto dont_put_skbuff_in_hw;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 #if REUSE_SKBUFFS_WITHOUT_FREE
0299 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
0300 if (unlikely(skb->data < fpa_head)) {
0301
0302 goto dont_put_skbuff_in_hw;
0303 }
0304 if (unlikely
0305 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
0306
0307 goto dont_put_skbuff_in_hw;
0308 }
0309 if (unlikely(skb_shared(skb))) {
0310
0311 goto dont_put_skbuff_in_hw;
0312 }
0313 if (unlikely(skb_cloned(skb))) {
0314
0315 goto dont_put_skbuff_in_hw;
0316 }
0317 if (unlikely(skb_header_cloned(skb))) {
0318
0319 goto dont_put_skbuff_in_hw;
0320 }
0321 if (unlikely(skb->destructor)) {
0322
0323 goto dont_put_skbuff_in_hw;
0324 }
0325 if (unlikely(skb_shinfo(skb)->nr_frags)) {
0326
0327 goto dont_put_skbuff_in_hw;
0328 }
0329 if (unlikely
0330 (skb->truesize !=
0331 sizeof(*skb) + skb_end_offset(skb))) {
0332
0333 goto dont_put_skbuff_in_hw;
0334 }
0335
0336
0337
0338
0339
0340 pko_command.s.dontfree = 0;
0341
0342 hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
0343 ((unsigned long)fpa_head >> 7);
0344
0345 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
0346
0347
0348
0349
0350
0351 dst_release(skb_dst(skb));
0352 skb_dst_set(skb, NULL);
0353 skb_ext_reset(skb);
0354 nf_reset_ct(skb);
0355 skb_reset_redirect(skb);
0356
0357 #ifdef CONFIG_NET_SCHED
0358 skb->tc_index = 0;
0359 #endif
0360 #endif
0361
0362 dont_put_skbuff_in_hw:
0363
0364
0365 if ((skb->protocol == htons(ETH_P_IP)) &&
0366 (ip_hdr(skb)->version == 4) &&
0367 (ip_hdr(skb)->ihl == 5) &&
0368 ((ip_hdr(skb)->frag_off == 0) ||
0369 (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
0370 ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
0371 (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
0372
0373 pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
0374 }
0375
0376 if (USE_ASYNC_IOBDMA) {
0377
0378 CVMX_SYNCIOBDMA;
0379 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
0380 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
0381 } else {
0382
0383 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
0384 MAX_SKB_TO_FREE);
0385 buffers_to_free =
0386 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
0387 }
0388
0389 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
0390 priv->fau + qos * 4);
0391
0392
0393
0394
0395
0396 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
0397 pko_command.s.dontfree = 1;
0398
0399 if (pko_command.s.dontfree) {
0400 queue_type = QUEUE_CORE;
0401 pko_command.s.reg0 = priv->fau + qos * 4;
0402 } else {
0403 queue_type = QUEUE_HW;
0404 }
0405 if (USE_ASYNC_IOBDMA)
0406 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
0407 FAU_TOTAL_TX_TO_CLEAN, 1);
0408
0409 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
0410
0411
0412 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
0413 MAX_OUT_QUEUE_DEPTH)) {
0414 if (dev->tx_queue_len != 0) {
0415
0416 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
0417 flags);
0418 netif_stop_queue(dev);
0419 spin_lock_irqsave(&priv->tx_free_list[qos].lock,
0420 flags);
0421 } else {
0422
0423 queue_type = QUEUE_DROP;
0424 goto skip_xmit;
0425 }
0426 }
0427
0428 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
0429 CVMX_PKO_LOCK_NONE);
0430
0431
0432 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
0433 priv->queue + qos,
0434 pko_command, hw_buffer,
0435 CVMX_PKO_LOCK_NONE))) {
0436 printk_ratelimited("%s: Failed to send the packet\n",
0437 dev->name);
0438 queue_type = QUEUE_DROP;
0439 }
0440 skip_xmit:
0441 to_free_list = NULL;
0442
0443 switch (queue_type) {
0444 case QUEUE_DROP:
0445 skb->next = to_free_list;
0446 to_free_list = skb;
0447 dev->stats.tx_dropped++;
0448 break;
0449 case QUEUE_HW:
0450 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
0451 break;
0452 case QUEUE_CORE:
0453 __skb_queue_tail(&priv->tx_free_list[qos], skb);
0454 break;
0455 default:
0456 BUG();
0457 }
0458
0459 while (skb_to_free > 0) {
0460 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
0461
0462 t->next = to_free_list;
0463 to_free_list = t;
0464 skb_to_free--;
0465 }
0466
0467 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
0468
0469
0470 while (to_free_list) {
0471 struct sk_buff *t = to_free_list;
0472
0473 to_free_list = to_free_list->next;
0474 dev_kfree_skb_any(t);
0475 }
0476
0477 if (USE_ASYNC_IOBDMA) {
0478 CVMX_SYNCIOBDMA;
0479 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
0480
0481 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
0482 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
0483 } else {
0484 total_to_clean =
0485 cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
0486 }
0487
0488 if (total_to_clean & 0x3ff) {
0489
0490
0491
0492
0493
0494
0495 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
0496 }
0497
0498 cvm_oct_kick_tx_poll_watchdog();
0499
0500 return NETDEV_TX_OK;
0501 }
0502
0503
0504
0505
0506
0507
0508
0509 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
0510 {
0511 struct octeon_ethernet *priv = netdev_priv(dev);
0512 void *packet_buffer;
0513 void *copy_location;
0514
0515
0516 struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
0517
0518 if (unlikely(!work)) {
0519 printk_ratelimited("%s: Failed to allocate a work queue entry\n",
0520 dev->name);
0521 dev->stats.tx_dropped++;
0522 dev_kfree_skb_any(skb);
0523 return 0;
0524 }
0525
0526
0527 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
0528 if (unlikely(!packet_buffer)) {
0529 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
0530 dev->name);
0531 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
0532 dev->stats.tx_dropped++;
0533 dev_kfree_skb_any(skb);
0534 return 0;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 copy_location = packet_buffer + sizeof(u64);
0546 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
0547
0548
0549
0550
0551
0552
0553
0554 memcpy(copy_location, skb->data, skb->len);
0555
0556
0557
0558
0559
0560 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
0561 work->word0.pip.cn38xx.hw_chksum = skb->csum;
0562 work->word1.len = skb->len;
0563 cvmx_wqe_set_port(work, priv->port);
0564 cvmx_wqe_set_qos(work, priv->port & 0x7);
0565 cvmx_wqe_set_grp(work, pow_send_group);
0566 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
0567 work->word1.tag = pow_send_group;
0568
0569 work->word2.u64 = 0;
0570 work->word2.s.bufs = 1;
0571 work->packet_ptr.u64 = 0;
0572 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
0573 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
0574 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
0575 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
0576
0577 if (skb->protocol == htons(ETH_P_IP)) {
0578 work->word2.s.ip_offset = 14;
0579 #if 0
0580 work->word2.s.vlan_valid = 0;
0581 work->word2.s.vlan_cfi = 0;
0582 work->word2.s.vlan_id = 0;
0583 work->word2.s.dec_ipcomp = 0;
0584 #endif
0585 work->word2.s.tcp_or_udp =
0586 (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
0587 (ip_hdr(skb)->protocol == IPPROTO_UDP);
0588 #if 0
0589
0590 work->word2.s.dec_ipsec = 0;
0591
0592 work->word2.s.is_v6 = 0;
0593
0594 work->word2.s.software = 0;
0595
0596 work->word2.s.L4_error = 0;
0597 #endif
0598 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
0599 (ip_hdr(skb)->frag_off ==
0600 cpu_to_be16(1 << 14)));
0601 #if 0
0602
0603 work->word2.s.IP_exc = 0;
0604 #endif
0605 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
0606 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
0607 #if 0
0608
0609 work->word2.s.not_IP = 0;
0610
0611 work->word2.s.rcv_error = 0;
0612
0613 work->word2.s.err_code = 0;
0614 #endif
0615
0616
0617
0618
0619
0620
0621 memcpy(work->packet_data, skb->data + 10,
0622 sizeof(work->packet_data));
0623 } else {
0624 #if 0
0625 work->word2.snoip.vlan_valid = 0;
0626 work->word2.snoip.vlan_cfi = 0;
0627 work->word2.snoip.vlan_id = 0;
0628 work->word2.snoip.software = 0;
0629 #endif
0630 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
0631 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
0632 work->word2.snoip.is_bcast =
0633 (skb->pkt_type == PACKET_BROADCAST);
0634 work->word2.snoip.is_mcast =
0635 (skb->pkt_type == PACKET_MULTICAST);
0636 work->word2.snoip.not_IP = 1;
0637 #if 0
0638
0639 work->word2.snoip.rcv_error = 0;
0640
0641 work->word2.snoip.err_code = 0;
0642 #endif
0643 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
0644 }
0645
0646
0647 cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
0648 cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
0649 dev->stats.tx_packets++;
0650 dev->stats.tx_bytes += skb->len;
0651 dev_consume_skb_any(skb);
0652 return 0;
0653 }
0654
0655
0656
0657
0658
0659
0660 void cvm_oct_tx_shutdown_dev(struct net_device *dev)
0661 {
0662 struct octeon_ethernet *priv = netdev_priv(dev);
0663 unsigned long flags;
0664 int qos;
0665
0666 for (qos = 0; qos < 16; qos++) {
0667 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
0668 while (skb_queue_len(&priv->tx_free_list[qos]))
0669 dev_kfree_skb_any(__skb_dequeue
0670 (&priv->tx_free_list[qos]));
0671 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
0672 }
0673 }
0674
0675 static void cvm_oct_tx_do_cleanup(unsigned long arg)
0676 {
0677 int port;
0678
0679 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
0680 if (cvm_oct_device[port]) {
0681 struct net_device *dev = cvm_oct_device[port];
0682
0683 cvm_oct_free_tx_skbs(dev);
0684 }
0685 }
0686 }
0687
0688 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
0689 {
0690
0691 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
0692
0693 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
0694 return IRQ_HANDLED;
0695 }
0696
0697 void cvm_oct_tx_initialize(void)
0698 {
0699 int i;
0700
0701
0702 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
0703
0704 i = request_irq(OCTEON_IRQ_TIMER1,
0705 cvm_oct_tx_cleanup_watchdog, 0,
0706 "Ethernet", cvm_oct_device);
0707
0708 if (i)
0709 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
0710 }
0711
0712 void cvm_oct_tx_shutdown(void)
0713 {
0714
0715 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
0716 }