0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/module.h>
0015 #include <linux/types.h>
0016 #include <linux/errno.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/kernel.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/etherdevice.h>
0021 #include <linux/skbuff.h>
0022 #include <linux/init.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/mm.h>
0025 #include <linux/pm.h>
0026 #include <linux/ethtool.h>
0027 #include <linux/in.h>
0028 #include <linux/ip.h>
0029 #include <linux/ipv6.h>
0030 #include <linux/slab.h>
0031 #include <asm/hvcall.h>
0032 #include <linux/atomic.h>
0033 #include <asm/vio.h>
0034 #include <asm/iommu.h>
0035 #include <asm/firmware.h>
0036 #include <net/tcp.h>
0037 #include <net/ip6_checksum.h>
0038
0039 #include "ibmveth.h"
0040
0041 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
0042 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
0043 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
0044
0045 static struct kobj_type ktype_veth_pool;
0046
0047
0048 static const char ibmveth_driver_name[] = "ibmveth";
0049 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
0050 #define ibmveth_driver_version "1.06"
0051
0052 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
0053 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
0054 MODULE_LICENSE("GPL");
0055 MODULE_VERSION(ibmveth_driver_version);
0056
0057 static unsigned int tx_copybreak __read_mostly = 128;
0058 module_param(tx_copybreak, uint, 0644);
0059 MODULE_PARM_DESC(tx_copybreak,
0060 "Maximum size of packet that is copied to a new buffer on transmit");
0061
0062 static unsigned int rx_copybreak __read_mostly = 128;
0063 module_param(rx_copybreak, uint, 0644);
0064 MODULE_PARM_DESC(rx_copybreak,
0065 "Maximum size of packet that is copied to a new buffer on receive");
0066
0067 static unsigned int rx_flush __read_mostly = 0;
0068 module_param(rx_flush, uint, 0644);
0069 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
0070
0071 static bool old_large_send __read_mostly;
0072 module_param(old_large_send, bool, 0444);
0073 MODULE_PARM_DESC(old_large_send,
0074 "Use old large send method on firmware that supports the new method");
0075
0076 struct ibmveth_stat {
0077 char name[ETH_GSTRING_LEN];
0078 int offset;
0079 };
0080
0081 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
0082 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
0083
0084 static struct ibmveth_stat ibmveth_stats[] = {
0085 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
0086 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
0087 { "replenish_add_buff_failure",
0088 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
0089 { "replenish_add_buff_success",
0090 IBMVETH_STAT_OFF(replenish_add_buff_success) },
0091 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
0092 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
0093 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
0094 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
0095 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
0096 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
0097 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
0098 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
0099 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
0100 };
0101
0102
0103 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
0104 {
0105 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
0106 }
0107
0108 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
0109 {
0110 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
0111 IBMVETH_RXQ_TOGGLE_SHIFT;
0112 }
0113
0114 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
0115 {
0116 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
0117 }
0118
0119 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
0120 {
0121 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
0122 }
0123
0124 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
0125 {
0126 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
0127 }
0128
0129 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
0130 {
0131 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
0132 }
0133
0134 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
0135 {
0136 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
0137 }
0138
0139 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
0140 {
0141 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
0142 }
0143
0144
0145 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
0146 u32 pool_index, u32 pool_size,
0147 u32 buff_size, u32 pool_active)
0148 {
0149 pool->size = pool_size;
0150 pool->index = pool_index;
0151 pool->buff_size = buff_size;
0152 pool->threshold = pool_size * 7 / 8;
0153 pool->active = pool_active;
0154 }
0155
0156
0157 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
0158 {
0159 int i;
0160
0161 pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
0162
0163 if (!pool->free_map)
0164 return -1;
0165
0166 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
0167 if (!pool->dma_addr) {
0168 kfree(pool->free_map);
0169 pool->free_map = NULL;
0170 return -1;
0171 }
0172
0173 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
0174
0175 if (!pool->skbuff) {
0176 kfree(pool->dma_addr);
0177 pool->dma_addr = NULL;
0178
0179 kfree(pool->free_map);
0180 pool->free_map = NULL;
0181 return -1;
0182 }
0183
0184 for (i = 0; i < pool->size; ++i)
0185 pool->free_map[i] = i;
0186
0187 atomic_set(&pool->available, 0);
0188 pool->producer_index = 0;
0189 pool->consumer_index = 0;
0190
0191 return 0;
0192 }
0193
0194 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
0195 {
0196 unsigned long offset;
0197
0198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
0199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
0200 }
0201
0202
0203
0204
0205 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
0206 struct ibmveth_buff_pool *pool)
0207 {
0208 u32 i;
0209 u32 count = pool->size - atomic_read(&pool->available);
0210 u32 buffers_added = 0;
0211 struct sk_buff *skb;
0212 unsigned int free_index, index;
0213 u64 correlator;
0214 unsigned long lpar_rc;
0215 dma_addr_t dma_addr;
0216
0217 mb();
0218
0219 for (i = 0; i < count; ++i) {
0220 union ibmveth_buf_desc desc;
0221
0222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
0223
0224 if (!skb) {
0225 netdev_dbg(adapter->netdev,
0226 "replenish: unable to allocate skb\n");
0227 adapter->replenish_no_mem++;
0228 break;
0229 }
0230
0231 free_index = pool->consumer_index;
0232 pool->consumer_index++;
0233 if (pool->consumer_index >= pool->size)
0234 pool->consumer_index = 0;
0235 index = pool->free_map[free_index];
0236
0237 BUG_ON(index == IBM_VETH_INVALID_MAP);
0238 BUG_ON(pool->skbuff[index] != NULL);
0239
0240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
0241 pool->buff_size, DMA_FROM_DEVICE);
0242
0243 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
0244 goto failure;
0245
0246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
0247 pool->dma_addr[index] = dma_addr;
0248 pool->skbuff[index] = skb;
0249
0250 correlator = ((u64)pool->index << 32) | index;
0251 *(u64 *)skb->data = correlator;
0252
0253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
0254 desc.fields.address = dma_addr;
0255
0256 if (rx_flush) {
0257 unsigned int len = min(pool->buff_size,
0258 adapter->netdev->mtu +
0259 IBMVETH_BUFF_OH);
0260 ibmveth_flush_buffer(skb->data, len);
0261 }
0262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
0263 desc.desc);
0264
0265 if (lpar_rc != H_SUCCESS) {
0266 goto failure;
0267 } else {
0268 buffers_added++;
0269 adapter->replenish_add_buff_success++;
0270 }
0271 }
0272
0273 mb();
0274 atomic_add(buffers_added, &(pool->available));
0275 return;
0276
0277 failure:
0278 pool->free_map[free_index] = index;
0279 pool->skbuff[index] = NULL;
0280 if (pool->consumer_index == 0)
0281 pool->consumer_index = pool->size - 1;
0282 else
0283 pool->consumer_index--;
0284 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
0285 dma_unmap_single(&adapter->vdev->dev,
0286 pool->dma_addr[index], pool->buff_size,
0287 DMA_FROM_DEVICE);
0288 dev_kfree_skb_any(skb);
0289 adapter->replenish_add_buff_failure++;
0290
0291 mb();
0292 atomic_add(buffers_added, &(pool->available));
0293 }
0294
0295
0296
0297
0298
0299
0300 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
0301 {
0302 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
0303
0304 adapter->rx_no_buffer = be64_to_cpup(p);
0305 }
0306
0307
0308 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
0309 {
0310 int i;
0311
0312 adapter->replenish_task_cycles++;
0313
0314 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
0315 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
0316
0317 if (pool->active &&
0318 (atomic_read(&pool->available) < pool->threshold))
0319 ibmveth_replenish_buffer_pool(adapter, pool);
0320 }
0321
0322 ibmveth_update_rx_no_buffer(adapter);
0323 }
0324
0325
0326 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
0327 struct ibmveth_buff_pool *pool)
0328 {
0329 int i;
0330
0331 kfree(pool->free_map);
0332 pool->free_map = NULL;
0333
0334 if (pool->skbuff && pool->dma_addr) {
0335 for (i = 0; i < pool->size; ++i) {
0336 struct sk_buff *skb = pool->skbuff[i];
0337 if (skb) {
0338 dma_unmap_single(&adapter->vdev->dev,
0339 pool->dma_addr[i],
0340 pool->buff_size,
0341 DMA_FROM_DEVICE);
0342 dev_kfree_skb_any(skb);
0343 pool->skbuff[i] = NULL;
0344 }
0345 }
0346 }
0347
0348 if (pool->dma_addr) {
0349 kfree(pool->dma_addr);
0350 pool->dma_addr = NULL;
0351 }
0352
0353 if (pool->skbuff) {
0354 kfree(pool->skbuff);
0355 pool->skbuff = NULL;
0356 }
0357 }
0358
0359
0360 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
0361 u64 correlator)
0362 {
0363 unsigned int pool = correlator >> 32;
0364 unsigned int index = correlator & 0xffffffffUL;
0365 unsigned int free_index;
0366 struct sk_buff *skb;
0367
0368 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
0369 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
0370
0371 skb = adapter->rx_buff_pool[pool].skbuff[index];
0372
0373 BUG_ON(skb == NULL);
0374
0375 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
0376
0377 dma_unmap_single(&adapter->vdev->dev,
0378 adapter->rx_buff_pool[pool].dma_addr[index],
0379 adapter->rx_buff_pool[pool].buff_size,
0380 DMA_FROM_DEVICE);
0381
0382 free_index = adapter->rx_buff_pool[pool].producer_index;
0383 adapter->rx_buff_pool[pool].producer_index++;
0384 if (adapter->rx_buff_pool[pool].producer_index >=
0385 adapter->rx_buff_pool[pool].size)
0386 adapter->rx_buff_pool[pool].producer_index = 0;
0387 adapter->rx_buff_pool[pool].free_map[free_index] = index;
0388
0389 mb();
0390
0391 atomic_dec(&(adapter->rx_buff_pool[pool].available));
0392 }
0393
0394
0395 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
0396 {
0397 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
0398 unsigned int pool = correlator >> 32;
0399 unsigned int index = correlator & 0xffffffffUL;
0400
0401 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
0402 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
0403
0404 return adapter->rx_buff_pool[pool].skbuff[index];
0405 }
0406
0407
0408 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
0409 {
0410 u32 q_index = adapter->rx_queue.index;
0411 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
0412 unsigned int pool = correlator >> 32;
0413 unsigned int index = correlator & 0xffffffffUL;
0414 union ibmveth_buf_desc desc;
0415 unsigned long lpar_rc;
0416 int ret = 1;
0417
0418 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
0419 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
0420
0421 if (!adapter->rx_buff_pool[pool].active) {
0422 ibmveth_rxq_harvest_buffer(adapter);
0423 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
0424 goto out;
0425 }
0426
0427 desc.fields.flags_len = IBMVETH_BUF_VALID |
0428 adapter->rx_buff_pool[pool].buff_size;
0429 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
0430
0431 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
0432
0433 if (lpar_rc != H_SUCCESS) {
0434 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
0435 "during recycle rc=%ld", lpar_rc);
0436 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
0437 ret = 0;
0438 }
0439
0440 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
0441 adapter->rx_queue.index = 0;
0442 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
0443 }
0444
0445 out:
0446 return ret;
0447 }
0448
0449 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
0450 {
0451 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
0452
0453 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
0454 adapter->rx_queue.index = 0;
0455 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
0456 }
0457 }
0458
0459 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
0460 union ibmveth_buf_desc rxq_desc, u64 mac_address)
0461 {
0462 int rc, try_again = 1;
0463
0464
0465
0466
0467
0468
0469 retry:
0470 rc = h_register_logical_lan(adapter->vdev->unit_address,
0471 adapter->buffer_list_dma, rxq_desc.desc,
0472 adapter->filter_list_dma, mac_address);
0473
0474 if (rc != H_SUCCESS && try_again) {
0475 do {
0476 rc = h_free_logical_lan(adapter->vdev->unit_address);
0477 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
0478
0479 try_again = 0;
0480 goto retry;
0481 }
0482
0483 return rc;
0484 }
0485
0486 static int ibmveth_open(struct net_device *netdev)
0487 {
0488 struct ibmveth_adapter *adapter = netdev_priv(netdev);
0489 u64 mac_address;
0490 int rxq_entries = 1;
0491 unsigned long lpar_rc;
0492 int rc;
0493 union ibmveth_buf_desc rxq_desc;
0494 int i;
0495 struct device *dev;
0496
0497 netdev_dbg(netdev, "open starting\n");
0498
0499 napi_enable(&adapter->napi);
0500
0501 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
0502 rxq_entries += adapter->rx_buff_pool[i].size;
0503
0504 rc = -ENOMEM;
0505 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
0506 if (!adapter->buffer_list_addr) {
0507 netdev_err(netdev, "unable to allocate list pages\n");
0508 goto out;
0509 }
0510
0511 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
0512 if (!adapter->filter_list_addr) {
0513 netdev_err(netdev, "unable to allocate filter pages\n");
0514 goto out_free_buffer_list;
0515 }
0516
0517 dev = &adapter->vdev->dev;
0518
0519 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
0520 rxq_entries;
0521 adapter->rx_queue.queue_addr =
0522 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
0523 &adapter->rx_queue.queue_dma, GFP_KERNEL);
0524 if (!adapter->rx_queue.queue_addr)
0525 goto out_free_filter_list;
0526
0527 adapter->buffer_list_dma = dma_map_single(dev,
0528 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
0529 if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
0530 netdev_err(netdev, "unable to map buffer list pages\n");
0531 goto out_free_queue_mem;
0532 }
0533
0534 adapter->filter_list_dma = dma_map_single(dev,
0535 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
0536 if (dma_mapping_error(dev, adapter->filter_list_dma)) {
0537 netdev_err(netdev, "unable to map filter list pages\n");
0538 goto out_unmap_buffer_list;
0539 }
0540
0541 adapter->rx_queue.index = 0;
0542 adapter->rx_queue.num_slots = rxq_entries;
0543 adapter->rx_queue.toggle = 1;
0544
0545 mac_address = ether_addr_to_u64(netdev->dev_addr);
0546
0547 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
0548 adapter->rx_queue.queue_len;
0549 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
0550
0551 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
0552 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
0553 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
0554
0555 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
0556
0557 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
0558
0559 if (lpar_rc != H_SUCCESS) {
0560 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
0561 lpar_rc);
0562 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
0563 "desc:0x%llx MAC:0x%llx\n",
0564 adapter->buffer_list_dma,
0565 adapter->filter_list_dma,
0566 rxq_desc.desc,
0567 mac_address);
0568 rc = -ENONET;
0569 goto out_unmap_filter_list;
0570 }
0571
0572 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
0573 if (!adapter->rx_buff_pool[i].active)
0574 continue;
0575 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
0576 netdev_err(netdev, "unable to alloc pool\n");
0577 adapter->rx_buff_pool[i].active = 0;
0578 rc = -ENOMEM;
0579 goto out_free_buffer_pools;
0580 }
0581 }
0582
0583 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
0584 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
0585 netdev);
0586 if (rc != 0) {
0587 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
0588 netdev->irq, rc);
0589 do {
0590 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
0591 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
0592
0593 goto out_free_buffer_pools;
0594 }
0595
0596 rc = -ENOMEM;
0597
0598 adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
0599 netdev->mtu + IBMVETH_BUFF_OH,
0600 &adapter->bounce_buffer_dma, GFP_KERNEL);
0601 if (!adapter->bounce_buffer) {
0602 netdev_err(netdev, "unable to alloc bounce buffer\n");
0603 goto out_free_irq;
0604 }
0605
0606 netdev_dbg(netdev, "initial replenish cycle\n");
0607 ibmveth_interrupt(netdev->irq, netdev);
0608
0609 netif_start_queue(netdev);
0610
0611 netdev_dbg(netdev, "open complete\n");
0612
0613 return 0;
0614
0615 out_free_irq:
0616 free_irq(netdev->irq, netdev);
0617 out_free_buffer_pools:
0618 while (--i >= 0) {
0619 if (adapter->rx_buff_pool[i].active)
0620 ibmveth_free_buffer_pool(adapter,
0621 &adapter->rx_buff_pool[i]);
0622 }
0623 out_unmap_filter_list:
0624 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
0625 DMA_BIDIRECTIONAL);
0626 out_unmap_buffer_list:
0627 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
0628 DMA_BIDIRECTIONAL);
0629 out_free_queue_mem:
0630 dma_free_coherent(dev, adapter->rx_queue.queue_len,
0631 adapter->rx_queue.queue_addr,
0632 adapter->rx_queue.queue_dma);
0633 out_free_filter_list:
0634 free_page((unsigned long)adapter->filter_list_addr);
0635 out_free_buffer_list:
0636 free_page((unsigned long)adapter->buffer_list_addr);
0637 out:
0638 napi_disable(&adapter->napi);
0639 return rc;
0640 }
0641
0642 static int ibmveth_close(struct net_device *netdev)
0643 {
0644 struct ibmveth_adapter *adapter = netdev_priv(netdev);
0645 struct device *dev = &adapter->vdev->dev;
0646 long lpar_rc;
0647 int i;
0648
0649 netdev_dbg(netdev, "close starting\n");
0650
0651 napi_disable(&adapter->napi);
0652
0653 if (!adapter->pool_config)
0654 netif_stop_queue(netdev);
0655
0656 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
0657
0658 do {
0659 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
0660 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
0661
0662 if (lpar_rc != H_SUCCESS) {
0663 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
0664 "continuing with close\n", lpar_rc);
0665 }
0666
0667 free_irq(netdev->irq, netdev);
0668
0669 ibmveth_update_rx_no_buffer(adapter);
0670
0671 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
0672 DMA_BIDIRECTIONAL);
0673 free_page((unsigned long)adapter->buffer_list_addr);
0674
0675 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
0676 DMA_BIDIRECTIONAL);
0677 free_page((unsigned long)adapter->filter_list_addr);
0678
0679 dma_free_coherent(dev, adapter->rx_queue.queue_len,
0680 adapter->rx_queue.queue_addr,
0681 adapter->rx_queue.queue_dma);
0682
0683 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
0684 if (adapter->rx_buff_pool[i].active)
0685 ibmveth_free_buffer_pool(adapter,
0686 &adapter->rx_buff_pool[i]);
0687
0688 dma_free_coherent(&adapter->vdev->dev,
0689 adapter->netdev->mtu + IBMVETH_BUFF_OH,
0690 adapter->bounce_buffer, adapter->bounce_buffer_dma);
0691
0692 netdev_dbg(netdev, "close complete\n");
0693
0694 return 0;
0695 }
0696
0697 static int ibmveth_set_link_ksettings(struct net_device *dev,
0698 const struct ethtool_link_ksettings *cmd)
0699 {
0700 struct ibmveth_adapter *adapter = netdev_priv(dev);
0701
0702 return ethtool_virtdev_set_link_ksettings(dev, cmd,
0703 &adapter->speed,
0704 &adapter->duplex);
0705 }
0706
0707 static int ibmveth_get_link_ksettings(struct net_device *dev,
0708 struct ethtool_link_ksettings *cmd)
0709 {
0710 struct ibmveth_adapter *adapter = netdev_priv(dev);
0711
0712 cmd->base.speed = adapter->speed;
0713 cmd->base.duplex = adapter->duplex;
0714 cmd->base.port = PORT_OTHER;
0715
0716 return 0;
0717 }
0718
0719 static void ibmveth_init_link_settings(struct net_device *dev)
0720 {
0721 struct ibmveth_adapter *adapter = netdev_priv(dev);
0722
0723 adapter->speed = SPEED_1000;
0724 adapter->duplex = DUPLEX_FULL;
0725 }
0726
0727 static void netdev_get_drvinfo(struct net_device *dev,
0728 struct ethtool_drvinfo *info)
0729 {
0730 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
0731 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
0732 }
0733
0734 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
0735 netdev_features_t features)
0736 {
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 if (!(features & NETIF_F_RXCSUM))
0747 features &= ~NETIF_F_CSUM_MASK;
0748
0749 return features;
0750 }
0751
0752 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
0753 {
0754 struct ibmveth_adapter *adapter = netdev_priv(dev);
0755 unsigned long set_attr, clr_attr, ret_attr;
0756 unsigned long set_attr6, clr_attr6;
0757 long ret, ret4, ret6;
0758 int rc1 = 0, rc2 = 0;
0759 int restart = 0;
0760
0761 if (netif_running(dev)) {
0762 restart = 1;
0763 adapter->pool_config = 1;
0764 ibmveth_close(dev);
0765 adapter->pool_config = 0;
0766 }
0767
0768 set_attr = 0;
0769 clr_attr = 0;
0770 set_attr6 = 0;
0771 clr_attr6 = 0;
0772
0773 if (data) {
0774 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
0775 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
0776 } else {
0777 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
0778 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
0779 }
0780
0781 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
0782
0783 if (ret == H_SUCCESS &&
0784 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
0785 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
0786 set_attr, &ret_attr);
0787
0788 if (ret4 != H_SUCCESS) {
0789 netdev_err(dev, "unable to change IPv4 checksum "
0790 "offload settings. %d rc=%ld\n",
0791 data, ret4);
0792
0793 h_illan_attributes(adapter->vdev->unit_address,
0794 set_attr, clr_attr, &ret_attr);
0795
0796 if (data == 1)
0797 dev->features &= ~NETIF_F_IP_CSUM;
0798
0799 } else {
0800 adapter->fw_ipv4_csum_support = data;
0801 }
0802
0803 ret6 = h_illan_attributes(adapter->vdev->unit_address,
0804 clr_attr6, set_attr6, &ret_attr);
0805
0806 if (ret6 != H_SUCCESS) {
0807 netdev_err(dev, "unable to change IPv6 checksum "
0808 "offload settings. %d rc=%ld\n",
0809 data, ret6);
0810
0811 h_illan_attributes(adapter->vdev->unit_address,
0812 set_attr6, clr_attr6, &ret_attr);
0813
0814 if (data == 1)
0815 dev->features &= ~NETIF_F_IPV6_CSUM;
0816
0817 } else
0818 adapter->fw_ipv6_csum_support = data;
0819
0820 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
0821 adapter->rx_csum = data;
0822 else
0823 rc1 = -EIO;
0824 } else {
0825 rc1 = -EIO;
0826 netdev_err(dev, "unable to change checksum offload settings."
0827 " %d rc=%ld ret_attr=%lx\n", data, ret,
0828 ret_attr);
0829 }
0830
0831 if (restart)
0832 rc2 = ibmveth_open(dev);
0833
0834 return rc1 ? rc1 : rc2;
0835 }
0836
0837 static int ibmveth_set_tso(struct net_device *dev, u32 data)
0838 {
0839 struct ibmveth_adapter *adapter = netdev_priv(dev);
0840 unsigned long set_attr, clr_attr, ret_attr;
0841 long ret1, ret2;
0842 int rc1 = 0, rc2 = 0;
0843 int restart = 0;
0844
0845 if (netif_running(dev)) {
0846 restart = 1;
0847 adapter->pool_config = 1;
0848 ibmveth_close(dev);
0849 adapter->pool_config = 0;
0850 }
0851
0852 set_attr = 0;
0853 clr_attr = 0;
0854
0855 if (data)
0856 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
0857 else
0858 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
0859
0860 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
0861
0862 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
0863 !old_large_send) {
0864 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
0865 set_attr, &ret_attr);
0866
0867 if (ret2 != H_SUCCESS) {
0868 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
0869 data, ret2);
0870
0871 h_illan_attributes(adapter->vdev->unit_address,
0872 set_attr, clr_attr, &ret_attr);
0873
0874 if (data == 1)
0875 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
0876 rc1 = -EIO;
0877
0878 } else {
0879 adapter->fw_large_send_support = data;
0880 adapter->large_send = data;
0881 }
0882 } else {
0883
0884
0885
0886 if (data == 1) {
0887 dev->features &= ~NETIF_F_TSO6;
0888 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
0889 }
0890 adapter->large_send = data;
0891 }
0892
0893 if (restart)
0894 rc2 = ibmveth_open(dev);
0895
0896 return rc1 ? rc1 : rc2;
0897 }
0898
0899 static int ibmveth_set_features(struct net_device *dev,
0900 netdev_features_t features)
0901 {
0902 struct ibmveth_adapter *adapter = netdev_priv(dev);
0903 int rx_csum = !!(features & NETIF_F_RXCSUM);
0904 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
0905 int rc1 = 0, rc2 = 0;
0906
0907 if (rx_csum != adapter->rx_csum) {
0908 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
0909 if (rc1 && !adapter->rx_csum)
0910 dev->features =
0911 features & ~(NETIF_F_CSUM_MASK |
0912 NETIF_F_RXCSUM);
0913 }
0914
0915 if (large_send != adapter->large_send) {
0916 rc2 = ibmveth_set_tso(dev, large_send);
0917 if (rc2 && !adapter->large_send)
0918 dev->features =
0919 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
0920 }
0921
0922 return rc1 ? rc1 : rc2;
0923 }
0924
0925 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
0926 {
0927 int i;
0928
0929 if (stringset != ETH_SS_STATS)
0930 return;
0931
0932 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
0933 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
0934 }
0935
0936 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
0937 {
0938 switch (sset) {
0939 case ETH_SS_STATS:
0940 return ARRAY_SIZE(ibmveth_stats);
0941 default:
0942 return -EOPNOTSUPP;
0943 }
0944 }
0945
0946 static void ibmveth_get_ethtool_stats(struct net_device *dev,
0947 struct ethtool_stats *stats, u64 *data)
0948 {
0949 int i;
0950 struct ibmveth_adapter *adapter = netdev_priv(dev);
0951
0952 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
0953 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
0954 }
0955
0956 static const struct ethtool_ops netdev_ethtool_ops = {
0957 .get_drvinfo = netdev_get_drvinfo,
0958 .get_link = ethtool_op_get_link,
0959 .get_strings = ibmveth_get_strings,
0960 .get_sset_count = ibmveth_get_sset_count,
0961 .get_ethtool_stats = ibmveth_get_ethtool_stats,
0962 .get_link_ksettings = ibmveth_get_link_ksettings,
0963 .set_link_ksettings = ibmveth_set_link_ksettings,
0964 };
0965
0966 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
0967 {
0968 return -EOPNOTSUPP;
0969 }
0970
0971 static int ibmveth_send(struct ibmveth_adapter *adapter,
0972 union ibmveth_buf_desc *descs, unsigned long mss)
0973 {
0974 unsigned long correlator;
0975 unsigned int retry_count;
0976 unsigned long ret;
0977
0978
0979
0980
0981
0982 retry_count = 1024;
0983 correlator = 0;
0984 do {
0985 ret = h_send_logical_lan(adapter->vdev->unit_address,
0986 descs[0].desc, descs[1].desc,
0987 descs[2].desc, descs[3].desc,
0988 descs[4].desc, descs[5].desc,
0989 correlator, &correlator, mss,
0990 adapter->fw_large_send_support);
0991 } while ((ret == H_BUSY) && (retry_count--));
0992
0993 if (ret != H_SUCCESS && ret != H_DROPPED) {
0994 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
0995 "with rc=%ld\n", ret);
0996 return 1;
0997 }
0998
0999 return 0;
1000 }
1001
1002 static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
1003 struct net_device *netdev)
1004 {
1005 struct ethhdr *ether_header;
1006 int ret = 0;
1007
1008 ether_header = eth_hdr(skb);
1009
1010 if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
1011 netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
1012 netdev->stats.tx_dropped++;
1013 ret = -EOPNOTSUPP;
1014 }
1015
1016 return ret;
1017 }
1018
1019 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1020 struct net_device *netdev)
1021 {
1022 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1023 unsigned int desc_flags;
1024 union ibmveth_buf_desc descs[6];
1025 int last, i;
1026 int force_bounce = 0;
1027 dma_addr_t dma_addr;
1028 unsigned long mss = 0;
1029
1030 if (ibmveth_is_packet_unsupported(skb, netdev))
1031 goto out;
1032
1033
1034
1035
1036 if (adapter->is_active_trunk &&
1037 skb_has_frag_list(skb) && __skb_linearize(skb)) {
1038 netdev->stats.tx_dropped++;
1039 goto out;
1040 }
1041
1042
1043
1044
1045
1046 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1047 netdev->stats.tx_dropped++;
1048 goto out;
1049 }
1050
1051
1052 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1053 ((skb->protocol == htons(ETH_P_IP) &&
1054 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1055 (skb->protocol == htons(ETH_P_IPV6) &&
1056 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1057 skb_checksum_help(skb)) {
1058
1059 netdev_err(netdev, "tx: failed to checksum packet\n");
1060 netdev->stats.tx_dropped++;
1061 goto out;
1062 }
1063
1064 desc_flags = IBMVETH_BUF_VALID;
1065
1066 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1067 unsigned char *buf = skb_transport_header(skb) +
1068 skb->csum_offset;
1069
1070 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1071
1072
1073 buf[0] = 0;
1074 buf[1] = 0;
1075
1076 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1077 desc_flags |= IBMVETH_BUF_LRG_SND;
1078 }
1079
1080 retry_bounce:
1081 memset(descs, 0, sizeof(descs));
1082
1083
1084
1085
1086
1087
1088 if (force_bounce || (!skb_is_nonlinear(skb) &&
1089 (skb->len < tx_copybreak))) {
1090 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1091 skb->len);
1092
1093 descs[0].fields.flags_len = desc_flags | skb->len;
1094 descs[0].fields.address = adapter->bounce_buffer_dma;
1095
1096 if (ibmveth_send(adapter, descs, 0)) {
1097 adapter->tx_send_failed++;
1098 netdev->stats.tx_dropped++;
1099 } else {
1100 netdev->stats.tx_packets++;
1101 netdev->stats.tx_bytes += skb->len;
1102 }
1103
1104 goto out;
1105 }
1106
1107
1108 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1109 skb_headlen(skb), DMA_TO_DEVICE);
1110 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1111 goto map_failed;
1112
1113 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1114 descs[0].fields.address = dma_addr;
1115
1116
1117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1118 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1119
1120 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1121 skb_frag_size(frag), DMA_TO_DEVICE);
1122
1123 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1124 goto map_failed_frags;
1125
1126 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1127 descs[i+1].fields.address = dma_addr;
1128 }
1129
1130 if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1131 if (adapter->fw_large_send_support) {
1132 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1133 adapter->tx_large_packets++;
1134 } else if (!skb_is_gso_v6(skb)) {
1135
1136
1137
1138
1139 ip_hdr(skb)->check = 0xffff;
1140 tcp_hdr(skb)->check =
1141 cpu_to_be16(skb_shinfo(skb)->gso_size);
1142 adapter->tx_large_packets++;
1143 }
1144 }
1145
1146 if (ibmveth_send(adapter, descs, mss)) {
1147 adapter->tx_send_failed++;
1148 netdev->stats.tx_dropped++;
1149 } else {
1150 netdev->stats.tx_packets++;
1151 netdev->stats.tx_bytes += skb->len;
1152 }
1153
1154 dma_unmap_single(&adapter->vdev->dev,
1155 descs[0].fields.address,
1156 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1157 DMA_TO_DEVICE);
1158
1159 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1160 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1161 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1162 DMA_TO_DEVICE);
1163
1164 out:
1165 dev_consume_skb_any(skb);
1166 return NETDEV_TX_OK;
1167
1168 map_failed_frags:
1169 last = i+1;
1170 for (i = 1; i < last; i++)
1171 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1172 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1173 DMA_TO_DEVICE);
1174
1175 dma_unmap_single(&adapter->vdev->dev,
1176 descs[0].fields.address,
1177 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1178 DMA_TO_DEVICE);
1179 map_failed:
1180 if (!firmware_has_feature(FW_FEATURE_CMO))
1181 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1182 adapter->tx_map_failed++;
1183 if (skb_linearize(skb)) {
1184 netdev->stats.tx_dropped++;
1185 goto out;
1186 }
1187 force_bounce = 1;
1188 goto retry_bounce;
1189 }
1190
1191 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1192 {
1193 struct tcphdr *tcph;
1194 int offset = 0;
1195 int hdr_len;
1196
1197
1198 if (skb->protocol == htons(ETH_P_IP)) {
1199 struct iphdr *iph = (struct iphdr *)skb->data;
1200
1201 if (iph->protocol == IPPROTO_TCP) {
1202 offset = iph->ihl * 4;
1203 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1204 } else {
1205 return;
1206 }
1207 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1208 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1209
1210 if (iph6->nexthdr == IPPROTO_TCP) {
1211 offset = sizeof(struct ipv6hdr);
1212 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1213 } else {
1214 return;
1215 }
1216 } else {
1217 return;
1218 }
1219
1220
1221
1222 tcph = (struct tcphdr *)(skb->data + offset);
1223 if (lrg_pkt) {
1224 skb_shinfo(skb)->gso_size = mss;
1225 } else if (offset) {
1226 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1227 tcph->check = 0;
1228 }
1229
1230 if (skb_shinfo(skb)->gso_size) {
1231 hdr_len = offset + tcph->doff * 4;
1232 skb_shinfo(skb)->gso_segs =
1233 DIV_ROUND_UP(skb->len - hdr_len,
1234 skb_shinfo(skb)->gso_size);
1235 }
1236 }
1237
1238 static void ibmveth_rx_csum_helper(struct sk_buff *skb,
1239 struct ibmveth_adapter *adapter)
1240 {
1241 struct iphdr *iph = NULL;
1242 struct ipv6hdr *iph6 = NULL;
1243 __be16 skb_proto = 0;
1244 u16 iphlen = 0;
1245 u16 iph_proto = 0;
1246 u16 tcphdrlen = 0;
1247
1248 skb_proto = be16_to_cpu(skb->protocol);
1249
1250 if (skb_proto == ETH_P_IP) {
1251 iph = (struct iphdr *)skb->data;
1252
1253
1254
1255
1256 if (iph->check == 0xffff) {
1257 iph->check = 0;
1258 iph->check = ip_fast_csum((unsigned char *)iph,
1259 iph->ihl);
1260 }
1261
1262 iphlen = iph->ihl * 4;
1263 iph_proto = iph->protocol;
1264 } else if (skb_proto == ETH_P_IPV6) {
1265 iph6 = (struct ipv6hdr *)skb->data;
1266 iphlen = sizeof(struct ipv6hdr);
1267 iph_proto = iph6->nexthdr;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 if (iph_proto == IPPROTO_TCP) {
1286 struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
1287 if (tcph->check == 0x0000) {
1288
1289 if (adapter->is_active_trunk) {
1290 tcphdrlen = skb->len - iphlen;
1291 if (skb_proto == ETH_P_IP)
1292 tcph->check =
1293 ~csum_tcpudp_magic(iph->saddr,
1294 iph->daddr, tcphdrlen, iph_proto, 0);
1295 else if (skb_proto == ETH_P_IPV6)
1296 tcph->check =
1297 ~csum_ipv6_magic(&iph6->saddr,
1298 &iph6->daddr, tcphdrlen, iph_proto, 0);
1299 }
1300
1301 skb_partial_csum_set(skb, iphlen,
1302 offsetof(struct tcphdr, check));
1303 skb_reset_network_header(skb);
1304 }
1305 }
1306 }
1307
1308 static int ibmveth_poll(struct napi_struct *napi, int budget)
1309 {
1310 struct ibmveth_adapter *adapter =
1311 container_of(napi, struct ibmveth_adapter, napi);
1312 struct net_device *netdev = adapter->netdev;
1313 int frames_processed = 0;
1314 unsigned long lpar_rc;
1315 u16 mss = 0;
1316
1317 while (frames_processed < budget) {
1318 if (!ibmveth_rxq_pending_buffer(adapter))
1319 break;
1320
1321 smp_rmb();
1322 if (!ibmveth_rxq_buffer_valid(adapter)) {
1323 wmb();
1324 adapter->rx_invalid_buffer++;
1325 netdev_dbg(netdev, "recycling invalid buffer\n");
1326 ibmveth_rxq_recycle_buffer(adapter);
1327 } else {
1328 struct sk_buff *skb, *new_skb;
1329 int length = ibmveth_rxq_frame_length(adapter);
1330 int offset = ibmveth_rxq_frame_offset(adapter);
1331 int csum_good = ibmveth_rxq_csum_good(adapter);
1332 int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1333 __sum16 iph_check = 0;
1334
1335 skb = ibmveth_rxq_get_buffer(adapter);
1336
1337
1338
1339
1340
1341
1342 if (lrg_pkt) {
1343 __be64 *rxmss = (__be64 *)(skb->data + 8);
1344
1345 mss = (u16)be64_to_cpu(*rxmss);
1346 }
1347
1348 new_skb = NULL;
1349 if (length < rx_copybreak)
1350 new_skb = netdev_alloc_skb(netdev, length);
1351
1352 if (new_skb) {
1353 skb_copy_to_linear_data(new_skb,
1354 skb->data + offset,
1355 length);
1356 if (rx_flush)
1357 ibmveth_flush_buffer(skb->data,
1358 length + offset);
1359 if (!ibmveth_rxq_recycle_buffer(adapter))
1360 kfree_skb(skb);
1361 skb = new_skb;
1362 } else {
1363 ibmveth_rxq_harvest_buffer(adapter);
1364 skb_reserve(skb, offset);
1365 }
1366
1367 skb_put(skb, length);
1368 skb->protocol = eth_type_trans(skb, netdev);
1369
1370
1371
1372
1373 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1374 struct iphdr *iph = (struct iphdr *)skb->data;
1375
1376 iph_check = iph->check;
1377 }
1378
1379 if ((length > netdev->mtu + ETH_HLEN) ||
1380 lrg_pkt || iph_check == 0xffff) {
1381 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1382 adapter->rx_large_packets++;
1383 }
1384
1385 if (csum_good) {
1386 skb->ip_summed = CHECKSUM_UNNECESSARY;
1387 ibmveth_rx_csum_helper(skb, adapter);
1388 }
1389
1390 napi_gro_receive(napi, skb);
1391
1392 netdev->stats.rx_packets++;
1393 netdev->stats.rx_bytes += length;
1394 frames_processed++;
1395 }
1396 }
1397
1398 ibmveth_replenish_task(adapter);
1399
1400 if (frames_processed < budget) {
1401 napi_complete_done(napi, frames_processed);
1402
1403
1404
1405
1406 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1407 VIO_IRQ_ENABLE);
1408
1409 BUG_ON(lpar_rc != H_SUCCESS);
1410
1411 if (ibmveth_rxq_pending_buffer(adapter) &&
1412 napi_reschedule(napi)) {
1413 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1414 VIO_IRQ_DISABLE);
1415 }
1416 }
1417
1418 return frames_processed;
1419 }
1420
1421 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1422 {
1423 struct net_device *netdev = dev_instance;
1424 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1425 unsigned long lpar_rc;
1426
1427 if (napi_schedule_prep(&adapter->napi)) {
1428 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1429 VIO_IRQ_DISABLE);
1430 BUG_ON(lpar_rc != H_SUCCESS);
1431 __napi_schedule(&adapter->napi);
1432 }
1433 return IRQ_HANDLED;
1434 }
1435
1436 static void ibmveth_set_multicast_list(struct net_device *netdev)
1437 {
1438 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1439 unsigned long lpar_rc;
1440
1441 if ((netdev->flags & IFF_PROMISC) ||
1442 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1443 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1444 IbmVethMcastEnableRecv |
1445 IbmVethMcastDisableFiltering,
1446 0);
1447 if (lpar_rc != H_SUCCESS) {
1448 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1449 "entering promisc mode\n", lpar_rc);
1450 }
1451 } else {
1452 struct netdev_hw_addr *ha;
1453
1454 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1455 IbmVethMcastEnableRecv |
1456 IbmVethMcastDisableFiltering |
1457 IbmVethMcastClearFilterTable,
1458 0);
1459 if (lpar_rc != H_SUCCESS) {
1460 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1461 "attempting to clear filter table\n",
1462 lpar_rc);
1463 }
1464
1465 netdev_for_each_mc_addr(ha, netdev) {
1466
1467 u64 mcast_addr;
1468 mcast_addr = ether_addr_to_u64(ha->addr);
1469 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1470 IbmVethMcastAddFilter,
1471 mcast_addr);
1472 if (lpar_rc != H_SUCCESS) {
1473 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1474 "when adding an entry to the filter "
1475 "table\n", lpar_rc);
1476 }
1477 }
1478
1479
1480 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1481 IbmVethMcastEnableFiltering,
1482 0);
1483 if (lpar_rc != H_SUCCESS) {
1484 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1485 "enabling filtering\n", lpar_rc);
1486 }
1487 }
1488 }
1489
1490 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1491 {
1492 struct ibmveth_adapter *adapter = netdev_priv(dev);
1493 struct vio_dev *viodev = adapter->vdev;
1494 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1495 int i, rc;
1496 int need_restart = 0;
1497
1498 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1499 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1500 break;
1501
1502 if (i == IBMVETH_NUM_BUFF_POOLS)
1503 return -EINVAL;
1504
1505
1506
1507 if (netif_running(adapter->netdev)) {
1508 need_restart = 1;
1509 adapter->pool_config = 1;
1510 ibmveth_close(adapter->netdev);
1511 adapter->pool_config = 0;
1512 }
1513
1514
1515 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1516 adapter->rx_buff_pool[i].active = 1;
1517
1518 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1519 dev->mtu = new_mtu;
1520 vio_cmo_set_dev_desired(viodev,
1521 ibmveth_get_desired_dma
1522 (viodev));
1523 if (need_restart) {
1524 return ibmveth_open(adapter->netdev);
1525 }
1526 return 0;
1527 }
1528 }
1529
1530 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1531 return rc;
1532
1533 return -EINVAL;
1534 }
1535
1536 #ifdef CONFIG_NET_POLL_CONTROLLER
1537 static void ibmveth_poll_controller(struct net_device *dev)
1538 {
1539 ibmveth_replenish_task(netdev_priv(dev));
1540 ibmveth_interrupt(dev->irq, dev);
1541 }
1542 #endif
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1553 {
1554 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1555 struct ibmveth_adapter *adapter;
1556 struct iommu_table *tbl;
1557 unsigned long ret;
1558 int i;
1559 int rxqentries = 1;
1560
1561 tbl = get_iommu_table_base(&vdev->dev);
1562
1563
1564 if (netdev == NULL)
1565 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1566
1567 adapter = netdev_priv(netdev);
1568
1569 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1570 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1571
1572 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1573
1574 if (adapter->rx_buff_pool[i].active)
1575 ret +=
1576 adapter->rx_buff_pool[i].size *
1577 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1578 buff_size, tbl);
1579 rxqentries += adapter->rx_buff_pool[i].size;
1580 }
1581
1582 ret += IOMMU_PAGE_ALIGN(
1583 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1584
1585 return ret;
1586 }
1587
1588 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1589 {
1590 struct ibmveth_adapter *adapter = netdev_priv(dev);
1591 struct sockaddr *addr = p;
1592 u64 mac_address;
1593 int rc;
1594
1595 if (!is_valid_ether_addr(addr->sa_data))
1596 return -EADDRNOTAVAIL;
1597
1598 mac_address = ether_addr_to_u64(addr->sa_data);
1599 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1600 if (rc) {
1601 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1602 return rc;
1603 }
1604
1605 eth_hw_addr_set(dev, addr->sa_data);
1606
1607 return 0;
1608 }
1609
1610 static const struct net_device_ops ibmveth_netdev_ops = {
1611 .ndo_open = ibmveth_open,
1612 .ndo_stop = ibmveth_close,
1613 .ndo_start_xmit = ibmveth_start_xmit,
1614 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1615 .ndo_eth_ioctl = ibmveth_ioctl,
1616 .ndo_change_mtu = ibmveth_change_mtu,
1617 .ndo_fix_features = ibmveth_fix_features,
1618 .ndo_set_features = ibmveth_set_features,
1619 .ndo_validate_addr = eth_validate_addr,
1620 .ndo_set_mac_address = ibmveth_set_mac_addr,
1621 #ifdef CONFIG_NET_POLL_CONTROLLER
1622 .ndo_poll_controller = ibmveth_poll_controller,
1623 #endif
1624 };
1625
1626 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1627 {
1628 int rc, i, mac_len;
1629 struct net_device *netdev;
1630 struct ibmveth_adapter *adapter;
1631 unsigned char *mac_addr_p;
1632 __be32 *mcastFilterSize_p;
1633 long ret;
1634 unsigned long ret_attr;
1635
1636 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1637 dev->unit_address);
1638
1639 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1640 &mac_len);
1641 if (!mac_addr_p) {
1642 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1643 return -EINVAL;
1644 }
1645
1646 if (mac_len == 8)
1647 mac_addr_p += 2;
1648 else if (mac_len != 6) {
1649 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1650 mac_len);
1651 return -EINVAL;
1652 }
1653
1654 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1655 VETH_MCAST_FILTER_SIZE,
1656 NULL);
1657 if (!mcastFilterSize_p) {
1658 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1659 "attribute\n");
1660 return -EINVAL;
1661 }
1662
1663 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1664
1665 if (!netdev)
1666 return -ENOMEM;
1667
1668 adapter = netdev_priv(netdev);
1669 dev_set_drvdata(&dev->dev, netdev);
1670
1671 adapter->vdev = dev;
1672 adapter->netdev = netdev;
1673 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1674 adapter->pool_config = 0;
1675 ibmveth_init_link_settings(netdev);
1676
1677 netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
1678
1679 netdev->irq = dev->irq;
1680 netdev->netdev_ops = &ibmveth_netdev_ops;
1681 netdev->ethtool_ops = &netdev_ethtool_ops;
1682 SET_NETDEV_DEV(netdev, &dev->dev);
1683 netdev->hw_features = NETIF_F_SG;
1684 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1685 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1686 NETIF_F_RXCSUM;
1687 }
1688
1689 netdev->features |= netdev->hw_features;
1690
1691 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1692
1693
1694 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1695 !old_large_send) {
1696 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1697 netdev->features |= netdev->hw_features;
1698 } else {
1699 netdev->hw_features |= NETIF_F_TSO;
1700 }
1701
1702 adapter->is_active_trunk = false;
1703 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
1704 adapter->is_active_trunk = true;
1705 netdev->hw_features |= NETIF_F_FRAGLIST;
1706 netdev->features |= NETIF_F_FRAGLIST;
1707 }
1708
1709 netdev->min_mtu = IBMVETH_MIN_MTU;
1710 netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
1711
1712 eth_hw_addr_set(netdev, mac_addr_p);
1713
1714 if (firmware_has_feature(FW_FEATURE_CMO))
1715 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1716
1717 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1718 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1719 int error;
1720
1721 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1722 pool_count[i], pool_size[i],
1723 pool_active[i]);
1724 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1725 &dev->dev.kobj, "pool%d", i);
1726 if (!error)
1727 kobject_uevent(kobj, KOBJ_ADD);
1728 }
1729
1730 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1731 netdev_dbg(netdev, "registering netdev...\n");
1732
1733 ibmveth_set_features(netdev, netdev->features);
1734
1735 rc = register_netdev(netdev);
1736
1737 if (rc) {
1738 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1739 free_netdev(netdev);
1740 return rc;
1741 }
1742
1743 netdev_dbg(netdev, "registered\n");
1744
1745 return 0;
1746 }
1747
1748 static void ibmveth_remove(struct vio_dev *dev)
1749 {
1750 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1751 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1752 int i;
1753
1754 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1755 kobject_put(&adapter->rx_buff_pool[i].kobj);
1756
1757 unregister_netdev(netdev);
1758
1759 free_netdev(netdev);
1760 dev_set_drvdata(&dev->dev, NULL);
1761 }
1762
1763 static struct attribute veth_active_attr;
1764 static struct attribute veth_num_attr;
1765 static struct attribute veth_size_attr;
1766
1767 static ssize_t veth_pool_show(struct kobject *kobj,
1768 struct attribute *attr, char *buf)
1769 {
1770 struct ibmveth_buff_pool *pool = container_of(kobj,
1771 struct ibmveth_buff_pool,
1772 kobj);
1773
1774 if (attr == &veth_active_attr)
1775 return sprintf(buf, "%d\n", pool->active);
1776 else if (attr == &veth_num_attr)
1777 return sprintf(buf, "%d\n", pool->size);
1778 else if (attr == &veth_size_attr)
1779 return sprintf(buf, "%d\n", pool->buff_size);
1780 return 0;
1781 }
1782
1783 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1784 const char *buf, size_t count)
1785 {
1786 struct ibmveth_buff_pool *pool = container_of(kobj,
1787 struct ibmveth_buff_pool,
1788 kobj);
1789 struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
1790 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1791 long value = simple_strtol(buf, NULL, 10);
1792 long rc;
1793
1794 if (attr == &veth_active_attr) {
1795 if (value && !pool->active) {
1796 if (netif_running(netdev)) {
1797 if (ibmveth_alloc_buffer_pool(pool)) {
1798 netdev_err(netdev,
1799 "unable to alloc pool\n");
1800 return -ENOMEM;
1801 }
1802 pool->active = 1;
1803 adapter->pool_config = 1;
1804 ibmveth_close(netdev);
1805 adapter->pool_config = 0;
1806 if ((rc = ibmveth_open(netdev)))
1807 return rc;
1808 } else {
1809 pool->active = 1;
1810 }
1811 } else if (!value && pool->active) {
1812 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1813 int i;
1814
1815
1816 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1817 if (pool == &adapter->rx_buff_pool[i])
1818 continue;
1819 if (!adapter->rx_buff_pool[i].active)
1820 continue;
1821 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1822 break;
1823 }
1824
1825 if (i == IBMVETH_NUM_BUFF_POOLS) {
1826 netdev_err(netdev, "no active pool >= MTU\n");
1827 return -EPERM;
1828 }
1829
1830 if (netif_running(netdev)) {
1831 adapter->pool_config = 1;
1832 ibmveth_close(netdev);
1833 pool->active = 0;
1834 adapter->pool_config = 0;
1835 if ((rc = ibmveth_open(netdev)))
1836 return rc;
1837 }
1838 pool->active = 0;
1839 }
1840 } else if (attr == &veth_num_attr) {
1841 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1842 return -EINVAL;
1843 } else {
1844 if (netif_running(netdev)) {
1845 adapter->pool_config = 1;
1846 ibmveth_close(netdev);
1847 adapter->pool_config = 0;
1848 pool->size = value;
1849 if ((rc = ibmveth_open(netdev)))
1850 return rc;
1851 } else {
1852 pool->size = value;
1853 }
1854 }
1855 } else if (attr == &veth_size_attr) {
1856 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1857 return -EINVAL;
1858 } else {
1859 if (netif_running(netdev)) {
1860 adapter->pool_config = 1;
1861 ibmveth_close(netdev);
1862 adapter->pool_config = 0;
1863 pool->buff_size = value;
1864 if ((rc = ibmveth_open(netdev)))
1865 return rc;
1866 } else {
1867 pool->buff_size = value;
1868 }
1869 }
1870 }
1871
1872
1873 ibmveth_interrupt(netdev->irq, netdev);
1874 return count;
1875 }
1876
1877
1878 #define ATTR(_name, _mode) \
1879 struct attribute veth_##_name##_attr = { \
1880 .name = __stringify(_name), .mode = _mode, \
1881 };
1882
1883 static ATTR(active, 0644);
1884 static ATTR(num, 0644);
1885 static ATTR(size, 0644);
1886
1887 static struct attribute *veth_pool_attrs[] = {
1888 &veth_active_attr,
1889 &veth_num_attr,
1890 &veth_size_attr,
1891 NULL,
1892 };
1893 ATTRIBUTE_GROUPS(veth_pool);
1894
1895 static const struct sysfs_ops veth_pool_ops = {
1896 .show = veth_pool_show,
1897 .store = veth_pool_store,
1898 };
1899
1900 static struct kobj_type ktype_veth_pool = {
1901 .release = NULL,
1902 .sysfs_ops = &veth_pool_ops,
1903 .default_groups = veth_pool_groups,
1904 };
1905
1906 static int ibmveth_resume(struct device *dev)
1907 {
1908 struct net_device *netdev = dev_get_drvdata(dev);
1909 ibmveth_interrupt(netdev->irq, netdev);
1910 return 0;
1911 }
1912
1913 static const struct vio_device_id ibmveth_device_table[] = {
1914 { "network", "IBM,l-lan"},
1915 { "", "" }
1916 };
1917 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1918
1919 static const struct dev_pm_ops ibmveth_pm_ops = {
1920 .resume = ibmveth_resume
1921 };
1922
1923 static struct vio_driver ibmveth_driver = {
1924 .id_table = ibmveth_device_table,
1925 .probe = ibmveth_probe,
1926 .remove = ibmveth_remove,
1927 .get_desired_dma = ibmveth_get_desired_dma,
1928 .name = ibmveth_driver_name,
1929 .pm = &ibmveth_pm_ops,
1930 };
1931
1932 static int __init ibmveth_module_init(void)
1933 {
1934 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1935 ibmveth_driver_string, ibmveth_driver_version);
1936
1937 return vio_register_driver(&ibmveth_driver);
1938 }
1939
1940 static void __exit ibmveth_module_exit(void)
1941 {
1942 vio_unregister_driver(&ibmveth_driver);
1943 }
1944
1945 module_init(ibmveth_module_init);
1946 module_exit(ibmveth_module_exit);