0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/module.h>
0036 #include <linux/moduleparam.h>
0037 #include <linux/types.h>
0038 #include <linux/errno.h>
0039 #include <linux/completion.h>
0040 #include <linux/ioport.h>
0041 #include <linux/dma-mapping.h>
0042 #include <linux/kernel.h>
0043 #include <linux/netdevice.h>
0044 #include <linux/etherdevice.h>
0045 #include <linux/skbuff.h>
0046 #include <linux/init.h>
0047 #include <linux/delay.h>
0048 #include <linux/mm.h>
0049 #include <linux/ethtool.h>
0050 #include <linux/proc_fs.h>
0051 #include <linux/if_arp.h>
0052 #include <linux/in.h>
0053 #include <linux/ip.h>
0054 #include <linux/ipv6.h>
0055 #include <linux/irq.h>
0056 #include <linux/irqdomain.h>
0057 #include <linux/kthread.h>
0058 #include <linux/seq_file.h>
0059 #include <linux/interrupt.h>
0060 #include <net/net_namespace.h>
0061 #include <asm/hvcall.h>
0062 #include <linux/atomic.h>
0063 #include <asm/vio.h>
0064 #include <asm/xive.h>
0065 #include <asm/iommu.h>
0066 #include <linux/uaccess.h>
0067 #include <asm/firmware.h>
0068 #include <linux/workqueue.h>
0069 #include <linux/if_vlan.h>
0070 #include <linux/utsname.h>
0071
0072 #include "ibmvnic.h"
0073
0074 static const char ibmvnic_driver_name[] = "ibmvnic";
0075 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
0076
0077 MODULE_AUTHOR("Santiago Leon");
0078 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
0079 MODULE_LICENSE("GPL");
0080 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
0081
0082 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
0083 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
0084 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
0085 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
0086 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
0087 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
0088 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
0089 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
0090 static int enable_scrq_irq(struct ibmvnic_adapter *,
0091 struct ibmvnic_sub_crq_queue *);
0092 static int disable_scrq_irq(struct ibmvnic_adapter *,
0093 struct ibmvnic_sub_crq_queue *);
0094 static int pending_scrq(struct ibmvnic_adapter *,
0095 struct ibmvnic_sub_crq_queue *);
0096 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
0097 struct ibmvnic_sub_crq_queue *);
0098 static int ibmvnic_poll(struct napi_struct *napi, int data);
0099 static void send_query_map(struct ibmvnic_adapter *adapter);
0100 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
0101 static int send_request_unmap(struct ibmvnic_adapter *, u8);
0102 static int send_login(struct ibmvnic_adapter *adapter);
0103 static void send_query_cap(struct ibmvnic_adapter *adapter);
0104 static int init_sub_crqs(struct ibmvnic_adapter *);
0105 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
0106 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
0107 static void release_crq_queue(struct ibmvnic_adapter *);
0108 static int __ibmvnic_set_mac(struct net_device *, u8 *);
0109 static int init_crq_queue(struct ibmvnic_adapter *adapter);
0110 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
0111 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
0112 struct ibmvnic_sub_crq_queue *tx_scrq);
0113 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
0114 struct ibmvnic_long_term_buff *ltb);
0115 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
0116
0117 struct ibmvnic_stat {
0118 char name[ETH_GSTRING_LEN];
0119 int offset;
0120 };
0121
0122 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
0123 offsetof(struct ibmvnic_statistics, stat))
0124 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
0125
0126 static const struct ibmvnic_stat ibmvnic_stats[] = {
0127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
0128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
0129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
0130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
0131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
0132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
0133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
0134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
0135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
0136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
0137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
0138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
0139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
0140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
0141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
0142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
0143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
0144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
0145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
0146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
0147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
0148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
0149 };
0150
0151 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
0152 {
0153 union ibmvnic_crq crq;
0154
0155 memset(&crq, 0, sizeof(crq));
0156 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
0157 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
0158
0159 return ibmvnic_send_crq(adapter, &crq);
0160 }
0161
0162 static int send_version_xchg(struct ibmvnic_adapter *adapter)
0163 {
0164 union ibmvnic_crq crq;
0165
0166 memset(&crq, 0, sizeof(crq));
0167 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
0168 crq.version_exchange.cmd = VERSION_EXCHANGE;
0169 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
0170
0171 return ibmvnic_send_crq(adapter, &crq);
0172 }
0173
0174 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
0175 unsigned long length, unsigned long *number,
0176 unsigned long *irq)
0177 {
0178 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0179 long rc;
0180
0181 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
0182 *number = retbuf[0];
0183 *irq = retbuf[1];
0184
0185 return rc;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
0198 struct completion *comp_done,
0199 unsigned long timeout)
0200 {
0201 struct net_device *netdev;
0202 unsigned long div_timeout;
0203 u8 retry;
0204
0205 netdev = adapter->netdev;
0206 retry = 5;
0207 div_timeout = msecs_to_jiffies(timeout / retry);
0208 while (true) {
0209 if (!adapter->crq.active) {
0210 netdev_err(netdev, "Device down!\n");
0211 return -ENODEV;
0212 }
0213 if (!retry--)
0214 break;
0215 if (wait_for_completion_timeout(comp_done, div_timeout))
0216 return 0;
0217 }
0218 netdev_err(netdev, "Operation timed out.\n");
0219 return -ETIMEDOUT;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
0232 {
0233 return (ltb->buff && ltb->size == size);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
0258 struct ibmvnic_long_term_buff *ltb, int size)
0259 {
0260 struct device *dev = &adapter->vdev->dev;
0261 u64 prev = 0;
0262 int rc;
0263
0264 if (!reuse_ltb(ltb, size)) {
0265 dev_dbg(dev,
0266 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
0267 ltb->size, size);
0268 prev = ltb->size;
0269 free_long_term_buff(adapter, ltb);
0270 }
0271
0272 if (ltb->buff) {
0273 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
0274 ltb->map_id, ltb->size);
0275 } else {
0276 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
0277 GFP_KERNEL);
0278 if (!ltb->buff) {
0279 dev_err(dev, "Couldn't alloc long term buffer\n");
0280 return -ENOMEM;
0281 }
0282 ltb->size = size;
0283
0284 ltb->map_id = find_first_zero_bit(adapter->map_ids,
0285 MAX_MAP_ID);
0286 bitmap_set(adapter->map_ids, ltb->map_id, 1);
0287
0288 dev_dbg(dev,
0289 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
0290 ltb->map_id, ltb->size, prev);
0291 }
0292
0293
0294 memset(ltb->buff, 0, ltb->size);
0295
0296 mutex_lock(&adapter->fw_lock);
0297 adapter->fw_done_rc = 0;
0298 reinit_completion(&adapter->fw_done);
0299
0300 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
0301 if (rc) {
0302 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
0303 goto out;
0304 }
0305
0306 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
0307 if (rc) {
0308 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
0309 rc);
0310 goto out;
0311 }
0312
0313 if (adapter->fw_done_rc) {
0314 dev_err(dev, "Couldn't map LTB, rc = %d\n",
0315 adapter->fw_done_rc);
0316 rc = -EIO;
0317 goto out;
0318 }
0319 rc = 0;
0320 out:
0321
0322 mutex_unlock(&adapter->fw_lock);
0323 return rc;
0324 }
0325
0326 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
0327 struct ibmvnic_long_term_buff *ltb)
0328 {
0329 struct device *dev = &adapter->vdev->dev;
0330
0331 if (!ltb->buff)
0332 return;
0333
0334
0335
0336
0337
0338 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
0339 adapter->reset_reason != VNIC_RESET_MOBILITY &&
0340 adapter->reset_reason != VNIC_RESET_TIMEOUT)
0341 send_request_unmap(adapter, ltb->map_id);
0342
0343 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
0344
0345 ltb->buff = NULL;
0346
0347 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
0348 ltb->map_id = 0;
0349 }
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 static void free_ltb_set(struct ibmvnic_adapter *adapter,
0360 struct ibmvnic_ltb_set *ltb_set)
0361 {
0362 int i;
0363
0364 for (i = 0; i < ltb_set->num_ltbs; i++)
0365 free_long_term_buff(adapter, <b_set->ltbs[i]);
0366
0367 kfree(ltb_set->ltbs);
0368 ltb_set->ltbs = NULL;
0369 ltb_set->num_ltbs = 0;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
0390 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
0391 int buff_size)
0392 {
0393 struct device *dev = &adapter->vdev->dev;
0394 struct ibmvnic_ltb_set old_set;
0395 struct ibmvnic_ltb_set new_set;
0396 int rem_size;
0397 int tot_size;
0398 int ltb_size;
0399 int nltbs;
0400 int rc;
0401 int n;
0402 int i;
0403
0404 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
0405 buff_size);
0406
0407 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
0408 tot_size = num_buffs * buff_size;
0409
0410 if (ltb_size > tot_size)
0411 ltb_size = tot_size;
0412
0413 nltbs = tot_size / ltb_size;
0414 if (tot_size % ltb_size)
0415 nltbs++;
0416
0417 old_set = *ltb_set;
0418
0419 if (old_set.num_ltbs == nltbs) {
0420 new_set = old_set;
0421 } else {
0422 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
0423
0424 new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
0425 if (!new_set.ltbs)
0426 return -ENOMEM;
0427
0428 new_set.num_ltbs = nltbs;
0429
0430
0431 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
0432 free_long_term_buff(adapter, &old_set.ltbs[i]);
0433
0434
0435
0436
0437
0438 n = min(old_set.num_ltbs, new_set.num_ltbs);
0439 for (i = 0; i < n; i++)
0440 new_set.ltbs[i] = old_set.ltbs[i];
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 kfree(old_set.ltbs);
0452 old_set.ltbs = NULL;
0453 old_set.num_ltbs = 0;
0454
0455
0456
0457
0458 *ltb_set = new_set;
0459 }
0460
0461 i = 0;
0462 rem_size = tot_size;
0463 while (rem_size) {
0464 if (ltb_size > rem_size)
0465 ltb_size = rem_size;
0466
0467 rem_size -= ltb_size;
0468
0469 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
0470 if (rc)
0471 goto out;
0472 i++;
0473 }
0474
0475 WARN_ON(i != new_set.num_ltbs);
0476
0477 return 0;
0478 out:
0479
0480
0481
0482 return rc;
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
0498 unsigned int bufidx,
0499 struct ibmvnic_long_term_buff **ltbp,
0500 unsigned int *offset)
0501 {
0502 struct ibmvnic_long_term_buff *ltb;
0503 int nbufs;
0504 int i;
0505
0506 WARN_ON(bufidx >= rxpool->size);
0507
0508 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
0509 ltb = &rxpool->ltb_set.ltbs[i];
0510 nbufs = ltb->size / rxpool->buff_size;
0511 if (bufidx < nbufs)
0512 break;
0513 bufidx -= nbufs;
0514 }
0515
0516 *ltbp = ltb;
0517 *offset = bufidx * rxpool->buff_size;
0518 }
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
0531 unsigned int bufidx,
0532 struct ibmvnic_long_term_buff **ltbp,
0533 unsigned int *offset)
0534 {
0535 struct ibmvnic_long_term_buff *ltb;
0536 int nbufs;
0537 int i;
0538
0539 WARN_ON_ONCE(bufidx >= txpool->num_buffers);
0540
0541 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
0542 ltb = &txpool->ltb_set.ltbs[i];
0543 nbufs = ltb->size / txpool->buf_size;
0544 if (bufidx < nbufs)
0545 break;
0546 bufidx -= nbufs;
0547 }
0548
0549 *ltbp = ltb;
0550 *offset = bufidx * txpool->buf_size;
0551 }
0552
0553 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
0554 {
0555 int i;
0556
0557 for (i = 0; i < adapter->num_active_rx_pools; i++)
0558 adapter->rx_pool[i].active = 0;
0559 }
0560
0561 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
0562 struct ibmvnic_rx_pool *pool)
0563 {
0564 int count = pool->size - atomic_read(&pool->available);
0565 u64 handle = adapter->rx_scrq[pool->index]->handle;
0566 struct device *dev = &adapter->vdev->dev;
0567 struct ibmvnic_ind_xmit_queue *ind_bufp;
0568 struct ibmvnic_sub_crq_queue *rx_scrq;
0569 struct ibmvnic_long_term_buff *ltb;
0570 union sub_crq *sub_crq;
0571 int buffers_added = 0;
0572 unsigned long lpar_rc;
0573 struct sk_buff *skb;
0574 unsigned int offset;
0575 dma_addr_t dma_addr;
0576 unsigned char *dst;
0577 int shift = 0;
0578 int bufidx;
0579 int i;
0580
0581 if (!pool->active)
0582 return;
0583
0584 rx_scrq = adapter->rx_scrq[pool->index];
0585 ind_bufp = &rx_scrq->ind_buf;
0586
0587
0588
0589
0590
0591
0592
0593 for (i = ind_bufp->index; i < count; ++i) {
0594 bufidx = pool->free_map[pool->next_free];
0595
0596
0597
0598
0599
0600
0601 skb = pool->rx_buff[bufidx].skb;
0602 if (!skb) {
0603 skb = netdev_alloc_skb(adapter->netdev,
0604 pool->buff_size);
0605 if (!skb) {
0606 dev_err(dev, "Couldn't replenish rx buff\n");
0607 adapter->replenish_no_mem++;
0608 break;
0609 }
0610 }
0611
0612 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
0613 pool->next_free = (pool->next_free + 1) % pool->size;
0614
0615
0616 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset);
0617 dst = ltb->buff + offset;
0618 memset(dst, 0, pool->buff_size);
0619 dma_addr = ltb->addr + offset;
0620
0621
0622 pool->rx_buff[bufidx].data = dst;
0623 pool->rx_buff[bufidx].dma = dma_addr;
0624 pool->rx_buff[bufidx].skb = skb;
0625 pool->rx_buff[bufidx].pool_index = pool->index;
0626 pool->rx_buff[bufidx].size = pool->buff_size;
0627
0628
0629 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
0630 memset(sub_crq, 0, sizeof(*sub_crq));
0631 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
0632 sub_crq->rx_add.correlator =
0633 cpu_to_be64((u64)&pool->rx_buff[bufidx]);
0634 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
0635 sub_crq->rx_add.map_id = ltb->map_id;
0636
0637
0638
0639
0640
0641
0642 #ifdef __LITTLE_ENDIAN__
0643 shift = 8;
0644 #endif
0645 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
0646
0647
0648 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
0649 i == count - 1) {
0650 lpar_rc =
0651 send_subcrq_indirect(adapter, handle,
0652 (u64)ind_bufp->indir_dma,
0653 (u64)ind_bufp->index);
0654 if (lpar_rc != H_SUCCESS)
0655 goto failure;
0656 buffers_added += ind_bufp->index;
0657 adapter->replenish_add_buff_success += ind_bufp->index;
0658 ind_bufp->index = 0;
0659 }
0660 }
0661 atomic_add(buffers_added, &pool->available);
0662 return;
0663
0664 failure:
0665 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
0666 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
0667 for (i = ind_bufp->index - 1; i >= 0; --i) {
0668 struct ibmvnic_rx_buff *rx_buff;
0669
0670 pool->next_free = pool->next_free == 0 ?
0671 pool->size - 1 : pool->next_free - 1;
0672 sub_crq = &ind_bufp->indir_arr[i];
0673 rx_buff = (struct ibmvnic_rx_buff *)
0674 be64_to_cpu(sub_crq->rx_add.correlator);
0675 bufidx = (int)(rx_buff - pool->rx_buff);
0676 pool->free_map[pool->next_free] = bufidx;
0677 dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
0678 pool->rx_buff[bufidx].skb = NULL;
0679 }
0680 adapter->replenish_add_buff_failure += ind_bufp->index;
0681 atomic_add(buffers_added, &pool->available);
0682 ind_bufp->index = 0;
0683 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
0684
0685
0686
0687
0688
0689 deactivate_rx_pools(adapter);
0690 netif_carrier_off(adapter->netdev);
0691 }
0692 }
0693
0694 static void replenish_pools(struct ibmvnic_adapter *adapter)
0695 {
0696 int i;
0697
0698 adapter->replenish_task_cycles++;
0699 for (i = 0; i < adapter->num_active_rx_pools; i++) {
0700 if (adapter->rx_pool[i].active)
0701 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
0702 }
0703
0704 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
0705 }
0706
0707 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
0708 {
0709 kfree(adapter->tx_stats_buffers);
0710 kfree(adapter->rx_stats_buffers);
0711 adapter->tx_stats_buffers = NULL;
0712 adapter->rx_stats_buffers = NULL;
0713 }
0714
0715 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
0716 {
0717 adapter->tx_stats_buffers =
0718 kcalloc(IBMVNIC_MAX_QUEUES,
0719 sizeof(struct ibmvnic_tx_queue_stats),
0720 GFP_KERNEL);
0721 if (!adapter->tx_stats_buffers)
0722 return -ENOMEM;
0723
0724 adapter->rx_stats_buffers =
0725 kcalloc(IBMVNIC_MAX_QUEUES,
0726 sizeof(struct ibmvnic_rx_queue_stats),
0727 GFP_KERNEL);
0728 if (!adapter->rx_stats_buffers)
0729 return -ENOMEM;
0730
0731 return 0;
0732 }
0733
0734 static void release_stats_token(struct ibmvnic_adapter *adapter)
0735 {
0736 struct device *dev = &adapter->vdev->dev;
0737
0738 if (!adapter->stats_token)
0739 return;
0740
0741 dma_unmap_single(dev, adapter->stats_token,
0742 sizeof(struct ibmvnic_statistics),
0743 DMA_FROM_DEVICE);
0744 adapter->stats_token = 0;
0745 }
0746
0747 static int init_stats_token(struct ibmvnic_adapter *adapter)
0748 {
0749 struct device *dev = &adapter->vdev->dev;
0750 dma_addr_t stok;
0751 int rc;
0752
0753 stok = dma_map_single(dev, &adapter->stats,
0754 sizeof(struct ibmvnic_statistics),
0755 DMA_FROM_DEVICE);
0756 rc = dma_mapping_error(dev, stok);
0757 if (rc) {
0758 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
0759 return rc;
0760 }
0761
0762 adapter->stats_token = stok;
0763 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
0764 return 0;
0765 }
0766
0767
0768
0769
0770
0771
0772
0773 static void release_rx_pools(struct ibmvnic_adapter *adapter)
0774 {
0775 struct ibmvnic_rx_pool *rx_pool;
0776 int i, j;
0777
0778 if (!adapter->rx_pool)
0779 return;
0780
0781 for (i = 0; i < adapter->num_active_rx_pools; i++) {
0782 rx_pool = &adapter->rx_pool[i];
0783
0784 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
0785
0786 kfree(rx_pool->free_map);
0787
0788 free_ltb_set(adapter, &rx_pool->ltb_set);
0789
0790 if (!rx_pool->rx_buff)
0791 continue;
0792
0793 for (j = 0; j < rx_pool->size; j++) {
0794 if (rx_pool->rx_buff[j].skb) {
0795 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
0796 rx_pool->rx_buff[j].skb = NULL;
0797 }
0798 }
0799
0800 kfree(rx_pool->rx_buff);
0801 }
0802
0803 kfree(adapter->rx_pool);
0804 adapter->rx_pool = NULL;
0805 adapter->num_active_rx_pools = 0;
0806 adapter->prev_rx_pool_size = 0;
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
0824 {
0825 u64 old_num_pools, new_num_pools;
0826 u64 old_pool_size, new_pool_size;
0827 u64 old_buff_size, new_buff_size;
0828
0829 if (!adapter->rx_pool)
0830 return false;
0831
0832 old_num_pools = adapter->num_active_rx_pools;
0833 new_num_pools = adapter->req_rx_queues;
0834
0835 old_pool_size = adapter->prev_rx_pool_size;
0836 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
0837
0838 old_buff_size = adapter->prev_rx_buf_sz;
0839 new_buff_size = adapter->cur_rx_buf_sz;
0840
0841 if (old_buff_size != new_buff_size ||
0842 old_num_pools != new_num_pools ||
0843 old_pool_size != new_pool_size)
0844 return false;
0845
0846 return true;
0847 }
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 static int init_rx_pools(struct net_device *netdev)
0861 {
0862 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
0863 struct device *dev = &adapter->vdev->dev;
0864 struct ibmvnic_rx_pool *rx_pool;
0865 u64 num_pools;
0866 u64 pool_size;
0867 u64 buff_size;
0868 int i, j, rc;
0869
0870 pool_size = adapter->req_rx_add_entries_per_subcrq;
0871 num_pools = adapter->req_rx_queues;
0872 buff_size = adapter->cur_rx_buf_sz;
0873
0874 if (reuse_rx_pools(adapter)) {
0875 dev_dbg(dev, "Reusing rx pools\n");
0876 goto update_ltb;
0877 }
0878
0879
0880 release_rx_pools(adapter);
0881
0882 adapter->rx_pool = kcalloc(num_pools,
0883 sizeof(struct ibmvnic_rx_pool),
0884 GFP_KERNEL);
0885 if (!adapter->rx_pool) {
0886 dev_err(dev, "Failed to allocate rx pools\n");
0887 return -ENOMEM;
0888 }
0889
0890
0891
0892
0893 adapter->num_active_rx_pools = num_pools;
0894
0895 for (i = 0; i < num_pools; i++) {
0896 rx_pool = &adapter->rx_pool[i];
0897
0898 netdev_dbg(adapter->netdev,
0899 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
0900 i, pool_size, buff_size);
0901
0902 rx_pool->size = pool_size;
0903 rx_pool->index = i;
0904 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
0905
0906 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
0907 GFP_KERNEL);
0908 if (!rx_pool->free_map) {
0909 dev_err(dev, "Couldn't alloc free_map %d\n", i);
0910 rc = -ENOMEM;
0911 goto out_release;
0912 }
0913
0914 rx_pool->rx_buff = kcalloc(rx_pool->size,
0915 sizeof(struct ibmvnic_rx_buff),
0916 GFP_KERNEL);
0917 if (!rx_pool->rx_buff) {
0918 dev_err(dev, "Couldn't alloc rx buffers\n");
0919 rc = -ENOMEM;
0920 goto out_release;
0921 }
0922 }
0923
0924 adapter->prev_rx_pool_size = pool_size;
0925 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
0926
0927 update_ltb:
0928 for (i = 0; i < num_pools; i++) {
0929 rx_pool = &adapter->rx_pool[i];
0930 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
0931 i, rx_pool->size, rx_pool->buff_size);
0932
0933 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
0934 rx_pool->size, rx_pool->buff_size);
0935 if (rc)
0936 goto out;
0937
0938 for (j = 0; j < rx_pool->size; ++j) {
0939 struct ibmvnic_rx_buff *rx_buff;
0940
0941 rx_pool->free_map[j] = j;
0942
0943
0944
0945
0946
0947 rx_buff = &rx_pool->rx_buff[j];
0948 rx_buff->dma = 0;
0949 rx_buff->data = 0;
0950 rx_buff->size = 0;
0951 rx_buff->pool_index = 0;
0952 }
0953
0954
0955
0956
0957 atomic_set(&rx_pool->available, 0);
0958 rx_pool->next_alloc = 0;
0959 rx_pool->next_free = 0;
0960
0961
0962
0963 rx_pool->active = 1;
0964 }
0965 return 0;
0966 out_release:
0967 release_rx_pools(adapter);
0968 out:
0969
0970
0971
0972 return rc;
0973 }
0974
0975 static void release_vpd_data(struct ibmvnic_adapter *adapter)
0976 {
0977 if (!adapter->vpd)
0978 return;
0979
0980 kfree(adapter->vpd->buff);
0981 kfree(adapter->vpd);
0982
0983 adapter->vpd = NULL;
0984 }
0985
0986 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
0987 struct ibmvnic_tx_pool *tx_pool)
0988 {
0989 kfree(tx_pool->tx_buff);
0990 kfree(tx_pool->free_map);
0991 free_ltb_set(adapter, &tx_pool->ltb_set);
0992 }
0993
0994
0995
0996
0997
0998
0999
1000 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1001 {
1002 int i;
1003
1004
1005
1006
1007 if (!adapter->tx_pool)
1008 return;
1009
1010 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1011 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1012 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1013 }
1014
1015 kfree(adapter->tx_pool);
1016 adapter->tx_pool = NULL;
1017 kfree(adapter->tso_pool);
1018 adapter->tso_pool = NULL;
1019 adapter->num_active_tx_pools = 0;
1020 adapter->prev_tx_pool_size = 0;
1021 }
1022
1023 static int init_one_tx_pool(struct net_device *netdev,
1024 struct ibmvnic_tx_pool *tx_pool,
1025 int pool_size, int buf_size)
1026 {
1027 int i;
1028
1029 tx_pool->tx_buff = kcalloc(pool_size,
1030 sizeof(struct ibmvnic_tx_buff),
1031 GFP_KERNEL);
1032 if (!tx_pool->tx_buff)
1033 return -ENOMEM;
1034
1035 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1036 if (!tx_pool->free_map) {
1037 kfree(tx_pool->tx_buff);
1038 tx_pool->tx_buff = NULL;
1039 return -ENOMEM;
1040 }
1041
1042 for (i = 0; i < pool_size; i++)
1043 tx_pool->free_map[i] = i;
1044
1045 tx_pool->consumer_index = 0;
1046 tx_pool->producer_index = 0;
1047 tx_pool->num_buffers = pool_size;
1048 tx_pool->buf_size = buf_size;
1049
1050 return 0;
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1067 {
1068 u64 old_num_pools, new_num_pools;
1069 u64 old_pool_size, new_pool_size;
1070 u64 old_mtu, new_mtu;
1071
1072 if (!adapter->tx_pool)
1073 return false;
1074
1075 old_num_pools = adapter->num_active_tx_pools;
1076 new_num_pools = adapter->num_active_tx_scrqs;
1077 old_pool_size = adapter->prev_tx_pool_size;
1078 new_pool_size = adapter->req_tx_entries_per_subcrq;
1079 old_mtu = adapter->prev_mtu;
1080 new_mtu = adapter->req_mtu;
1081
1082 if (old_mtu != new_mtu ||
1083 old_num_pools != new_num_pools ||
1084 old_pool_size != new_pool_size)
1085 return false;
1086
1087 return true;
1088 }
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 static int init_tx_pools(struct net_device *netdev)
1102 {
1103 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1104 struct device *dev = &adapter->vdev->dev;
1105 int num_pools;
1106 u64 pool_size;
1107 u64 buff_size;
1108 int i, j, rc;
1109
1110 num_pools = adapter->req_tx_queues;
1111
1112
1113
1114
1115
1116 if (reuse_tx_pools(adapter)) {
1117 netdev_dbg(netdev, "Reusing tx pools\n");
1118 goto update_ltb;
1119 }
1120
1121
1122 release_tx_pools(adapter);
1123
1124 pool_size = adapter->req_tx_entries_per_subcrq;
1125 num_pools = adapter->num_active_tx_scrqs;
1126
1127 adapter->tx_pool = kcalloc(num_pools,
1128 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1129 if (!adapter->tx_pool)
1130 return -ENOMEM;
1131
1132 adapter->tso_pool = kcalloc(num_pools,
1133 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1134
1135
1136
1137 if (!adapter->tso_pool) {
1138 kfree(adapter->tx_pool);
1139 adapter->tx_pool = NULL;
1140 return -ENOMEM;
1141 }
1142
1143
1144
1145
1146 adapter->num_active_tx_pools = num_pools;
1147
1148 buff_size = adapter->req_mtu + VLAN_HLEN;
1149 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1150
1151 for (i = 0; i < num_pools; i++) {
1152 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1153 i, adapter->req_tx_entries_per_subcrq, buff_size);
1154
1155 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1156 pool_size, buff_size);
1157 if (rc)
1158 goto out_release;
1159
1160 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1161 IBMVNIC_TSO_BUFS,
1162 IBMVNIC_TSO_BUF_SZ);
1163 if (rc)
1164 goto out_release;
1165 }
1166
1167 adapter->prev_tx_pool_size = pool_size;
1168 adapter->prev_mtu = adapter->req_mtu;
1169
1170 update_ltb:
1171
1172
1173
1174
1175
1176
1177 rc = -1;
1178 for (i = 0; i < num_pools; i++) {
1179 struct ibmvnic_tx_pool *tso_pool;
1180 struct ibmvnic_tx_pool *tx_pool;
1181
1182 tx_pool = &adapter->tx_pool[i];
1183
1184 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1185 i, tx_pool->num_buffers, tx_pool->buf_size);
1186
1187 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1188 tx_pool->num_buffers, tx_pool->buf_size);
1189 if (rc)
1190 goto out;
1191
1192 tx_pool->consumer_index = 0;
1193 tx_pool->producer_index = 0;
1194
1195 for (j = 0; j < tx_pool->num_buffers; j++)
1196 tx_pool->free_map[j] = j;
1197
1198 tso_pool = &adapter->tso_pool[i];
1199
1200 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1201 i, tso_pool->num_buffers, tso_pool->buf_size);
1202
1203 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1204 tso_pool->num_buffers, tso_pool->buf_size);
1205 if (rc)
1206 goto out;
1207
1208 tso_pool->consumer_index = 0;
1209 tso_pool->producer_index = 0;
1210
1211 for (j = 0; j < tso_pool->num_buffers; j++)
1212 tso_pool->free_map[j] = j;
1213 }
1214
1215 return 0;
1216 out_release:
1217 release_tx_pools(adapter);
1218 out:
1219
1220
1221
1222 return rc;
1223 }
1224
1225 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1226 {
1227 int i;
1228
1229 if (adapter->napi_enabled)
1230 return;
1231
1232 for (i = 0; i < adapter->req_rx_queues; i++)
1233 napi_enable(&adapter->napi[i]);
1234
1235 adapter->napi_enabled = true;
1236 }
1237
1238 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1239 {
1240 int i;
1241
1242 if (!adapter->napi_enabled)
1243 return;
1244
1245 for (i = 0; i < adapter->req_rx_queues; i++) {
1246 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1247 napi_disable(&adapter->napi[i]);
1248 }
1249
1250 adapter->napi_enabled = false;
1251 }
1252
1253 static int init_napi(struct ibmvnic_adapter *adapter)
1254 {
1255 int i;
1256
1257 adapter->napi = kcalloc(adapter->req_rx_queues,
1258 sizeof(struct napi_struct), GFP_KERNEL);
1259 if (!adapter->napi)
1260 return -ENOMEM;
1261
1262 for (i = 0; i < adapter->req_rx_queues; i++) {
1263 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1264 netif_napi_add(adapter->netdev, &adapter->napi[i],
1265 ibmvnic_poll, NAPI_POLL_WEIGHT);
1266 }
1267
1268 adapter->num_active_rx_napi = adapter->req_rx_queues;
1269 return 0;
1270 }
1271
1272 static void release_napi(struct ibmvnic_adapter *adapter)
1273 {
1274 int i;
1275
1276 if (!adapter->napi)
1277 return;
1278
1279 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1280 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1281 netif_napi_del(&adapter->napi[i]);
1282 }
1283
1284 kfree(adapter->napi);
1285 adapter->napi = NULL;
1286 adapter->num_active_rx_napi = 0;
1287 adapter->napi_enabled = false;
1288 }
1289
1290 static const char *adapter_state_to_string(enum vnic_state state)
1291 {
1292 switch (state) {
1293 case VNIC_PROBING:
1294 return "PROBING";
1295 case VNIC_PROBED:
1296 return "PROBED";
1297 case VNIC_OPENING:
1298 return "OPENING";
1299 case VNIC_OPEN:
1300 return "OPEN";
1301 case VNIC_CLOSING:
1302 return "CLOSING";
1303 case VNIC_CLOSED:
1304 return "CLOSED";
1305 case VNIC_REMOVING:
1306 return "REMOVING";
1307 case VNIC_REMOVED:
1308 return "REMOVED";
1309 case VNIC_DOWN:
1310 return "DOWN";
1311 }
1312 return "UNKNOWN";
1313 }
1314
1315 static int ibmvnic_login(struct net_device *netdev)
1316 {
1317 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1318 unsigned long timeout = msecs_to_jiffies(20000);
1319 int retry_count = 0;
1320 int retries = 10;
1321 bool retry;
1322 int rc;
1323
1324 do {
1325 retry = false;
1326 if (retry_count > retries) {
1327 netdev_warn(netdev, "Login attempts exceeded\n");
1328 return -EACCES;
1329 }
1330
1331 adapter->init_done_rc = 0;
1332 reinit_completion(&adapter->init_done);
1333 rc = send_login(adapter);
1334 if (rc)
1335 return rc;
1336
1337 if (!wait_for_completion_timeout(&adapter->init_done,
1338 timeout)) {
1339 netdev_warn(netdev, "Login timed out, retrying...\n");
1340 retry = true;
1341 adapter->init_done_rc = 0;
1342 retry_count++;
1343 continue;
1344 }
1345
1346 if (adapter->init_done_rc == ABORTED) {
1347 netdev_warn(netdev, "Login aborted, retrying...\n");
1348 retry = true;
1349 adapter->init_done_rc = 0;
1350 retry_count++;
1351
1352
1353
1354 msleep(500);
1355 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1356 retry_count++;
1357 release_sub_crqs(adapter, 1);
1358
1359 retry = true;
1360 netdev_dbg(netdev,
1361 "Received partial success, retrying...\n");
1362 adapter->init_done_rc = 0;
1363 reinit_completion(&adapter->init_done);
1364 send_query_cap(adapter);
1365 if (!wait_for_completion_timeout(&adapter->init_done,
1366 timeout)) {
1367 netdev_warn(netdev,
1368 "Capabilities query timed out\n");
1369 return -ETIMEDOUT;
1370 }
1371
1372 rc = init_sub_crqs(adapter);
1373 if (rc) {
1374 netdev_warn(netdev,
1375 "SCRQ initialization failed\n");
1376 return rc;
1377 }
1378
1379 rc = init_sub_crq_irqs(adapter);
1380 if (rc) {
1381 netdev_warn(netdev,
1382 "SCRQ irq initialization failed\n");
1383 return rc;
1384 }
1385 } else if (adapter->init_done_rc) {
1386 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1387 adapter->init_done_rc);
1388 return -EIO;
1389 }
1390 } while (retry);
1391
1392 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1393
1394 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1395 return 0;
1396 }
1397
1398 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1399 {
1400 kfree(adapter->login_buf);
1401 adapter->login_buf = NULL;
1402 }
1403
1404 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1405 {
1406 kfree(adapter->login_rsp_buf);
1407 adapter->login_rsp_buf = NULL;
1408 }
1409
1410 static void release_resources(struct ibmvnic_adapter *adapter)
1411 {
1412 release_vpd_data(adapter);
1413
1414 release_napi(adapter);
1415 release_login_buffer(adapter);
1416 release_login_rsp_buffer(adapter);
1417 }
1418
1419 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1420 {
1421 struct net_device *netdev = adapter->netdev;
1422 unsigned long timeout = msecs_to_jiffies(20000);
1423 union ibmvnic_crq crq;
1424 bool resend;
1425 int rc;
1426
1427 netdev_dbg(netdev, "setting link state %d\n", link_state);
1428
1429 memset(&crq, 0, sizeof(crq));
1430 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1431 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1432 crq.logical_link_state.link_state = link_state;
1433
1434 do {
1435 resend = false;
1436
1437 reinit_completion(&adapter->init_done);
1438 rc = ibmvnic_send_crq(adapter, &crq);
1439 if (rc) {
1440 netdev_err(netdev, "Failed to set link state\n");
1441 return rc;
1442 }
1443
1444 if (!wait_for_completion_timeout(&adapter->init_done,
1445 timeout)) {
1446 netdev_err(netdev, "timeout setting link state\n");
1447 return -ETIMEDOUT;
1448 }
1449
1450 if (adapter->init_done_rc == PARTIALSUCCESS) {
1451
1452 mdelay(1000);
1453 resend = true;
1454 } else if (adapter->init_done_rc) {
1455 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1456 adapter->init_done_rc);
1457 return adapter->init_done_rc;
1458 }
1459 } while (resend);
1460
1461 return 0;
1462 }
1463
1464 static int set_real_num_queues(struct net_device *netdev)
1465 {
1466 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1467 int rc;
1468
1469 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1470 adapter->req_tx_queues, adapter->req_rx_queues);
1471
1472 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1473 if (rc) {
1474 netdev_err(netdev, "failed to set the number of tx queues\n");
1475 return rc;
1476 }
1477
1478 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1479 if (rc)
1480 netdev_err(netdev, "failed to set the number of rx queues\n");
1481
1482 return rc;
1483 }
1484
1485 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1486 {
1487 struct device *dev = &adapter->vdev->dev;
1488 union ibmvnic_crq crq;
1489 int len = 0;
1490 int rc;
1491
1492 if (adapter->vpd->buff)
1493 len = adapter->vpd->len;
1494
1495 mutex_lock(&adapter->fw_lock);
1496 adapter->fw_done_rc = 0;
1497 reinit_completion(&adapter->fw_done);
1498
1499 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1500 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1501 rc = ibmvnic_send_crq(adapter, &crq);
1502 if (rc) {
1503 mutex_unlock(&adapter->fw_lock);
1504 return rc;
1505 }
1506
1507 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1508 if (rc) {
1509 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1510 mutex_unlock(&adapter->fw_lock);
1511 return rc;
1512 }
1513 mutex_unlock(&adapter->fw_lock);
1514
1515 if (!adapter->vpd->len)
1516 return -ENODATA;
1517
1518 if (!adapter->vpd->buff)
1519 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1520 else if (adapter->vpd->len != len)
1521 adapter->vpd->buff =
1522 krealloc(adapter->vpd->buff,
1523 adapter->vpd->len, GFP_KERNEL);
1524
1525 if (!adapter->vpd->buff) {
1526 dev_err(dev, "Could allocate VPD buffer\n");
1527 return -ENOMEM;
1528 }
1529
1530 adapter->vpd->dma_addr =
1531 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1532 DMA_FROM_DEVICE);
1533 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1534 dev_err(dev, "Could not map VPD buffer\n");
1535 kfree(adapter->vpd->buff);
1536 adapter->vpd->buff = NULL;
1537 return -ENOMEM;
1538 }
1539
1540 mutex_lock(&adapter->fw_lock);
1541 adapter->fw_done_rc = 0;
1542 reinit_completion(&adapter->fw_done);
1543
1544 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1545 crq.get_vpd.cmd = GET_VPD;
1546 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1547 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1548 rc = ibmvnic_send_crq(adapter, &crq);
1549 if (rc) {
1550 kfree(adapter->vpd->buff);
1551 adapter->vpd->buff = NULL;
1552 mutex_unlock(&adapter->fw_lock);
1553 return rc;
1554 }
1555
1556 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1557 if (rc) {
1558 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1559 kfree(adapter->vpd->buff);
1560 adapter->vpd->buff = NULL;
1561 mutex_unlock(&adapter->fw_lock);
1562 return rc;
1563 }
1564
1565 mutex_unlock(&adapter->fw_lock);
1566 return 0;
1567 }
1568
1569 static int init_resources(struct ibmvnic_adapter *adapter)
1570 {
1571 struct net_device *netdev = adapter->netdev;
1572 int rc;
1573
1574 rc = set_real_num_queues(netdev);
1575 if (rc)
1576 return rc;
1577
1578 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1579 if (!adapter->vpd)
1580 return -ENOMEM;
1581
1582
1583 rc = ibmvnic_get_vpd(adapter);
1584 if (rc) {
1585 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1586 return rc;
1587 }
1588
1589 rc = init_napi(adapter);
1590 if (rc)
1591 return rc;
1592
1593 send_query_map(adapter);
1594
1595 rc = init_rx_pools(netdev);
1596 if (rc)
1597 return rc;
1598
1599 rc = init_tx_pools(netdev);
1600 return rc;
1601 }
1602
1603 static int __ibmvnic_open(struct net_device *netdev)
1604 {
1605 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1606 enum vnic_state prev_state = adapter->state;
1607 int i, rc;
1608
1609 adapter->state = VNIC_OPENING;
1610 replenish_pools(adapter);
1611 ibmvnic_napi_enable(adapter);
1612
1613
1614
1615
1616 for (i = 0; i < adapter->req_rx_queues; i++) {
1617 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1618 if (prev_state == VNIC_CLOSED)
1619 enable_irq(adapter->rx_scrq[i]->irq);
1620 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1621 }
1622
1623 for (i = 0; i < adapter->req_tx_queues; i++) {
1624 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1625 if (prev_state == VNIC_CLOSED)
1626 enable_irq(adapter->tx_scrq[i]->irq);
1627 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1628 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1629 }
1630
1631 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1632 if (rc) {
1633 ibmvnic_napi_disable(adapter);
1634 ibmvnic_disable_irqs(adapter);
1635 return rc;
1636 }
1637
1638 adapter->tx_queues_active = true;
1639
1640
1641
1642
1643
1644
1645 synchronize_rcu();
1646
1647 netif_tx_start_all_queues(netdev);
1648
1649 if (prev_state == VNIC_CLOSED) {
1650 for (i = 0; i < adapter->req_rx_queues; i++)
1651 napi_schedule(&adapter->napi[i]);
1652 }
1653
1654 adapter->state = VNIC_OPEN;
1655 return rc;
1656 }
1657
1658 static int ibmvnic_open(struct net_device *netdev)
1659 {
1660 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1661 int rc;
1662
1663 ASSERT_RTNL();
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1677 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1678 adapter_state_to_string(adapter->state),
1679 adapter->failover_pending);
1680 adapter->state = VNIC_OPEN;
1681 rc = 0;
1682 goto out;
1683 }
1684
1685 if (adapter->state != VNIC_CLOSED) {
1686 rc = ibmvnic_login(netdev);
1687 if (rc)
1688 goto out;
1689
1690 rc = init_resources(adapter);
1691 if (rc) {
1692 netdev_err(netdev, "failed to initialize resources\n");
1693 goto out;
1694 }
1695 }
1696
1697 rc = __ibmvnic_open(netdev);
1698
1699 out:
1700
1701
1702
1703
1704 if (rc &&
1705 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1706 adapter->state = VNIC_OPEN;
1707 rc = 0;
1708 }
1709
1710 if (rc) {
1711 release_resources(adapter);
1712 release_rx_pools(adapter);
1713 release_tx_pools(adapter);
1714 }
1715
1716 return rc;
1717 }
1718
1719 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1720 {
1721 struct ibmvnic_rx_pool *rx_pool;
1722 struct ibmvnic_rx_buff *rx_buff;
1723 u64 rx_entries;
1724 int rx_scrqs;
1725 int i, j;
1726
1727 if (!adapter->rx_pool)
1728 return;
1729
1730 rx_scrqs = adapter->num_active_rx_pools;
1731 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1732
1733
1734 for (i = 0; i < rx_scrqs; i++) {
1735 rx_pool = &adapter->rx_pool[i];
1736 if (!rx_pool || !rx_pool->rx_buff)
1737 continue;
1738
1739 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1740 for (j = 0; j < rx_entries; j++) {
1741 rx_buff = &rx_pool->rx_buff[j];
1742 if (rx_buff && rx_buff->skb) {
1743 dev_kfree_skb_any(rx_buff->skb);
1744 rx_buff->skb = NULL;
1745 }
1746 }
1747 }
1748 }
1749
1750 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1751 struct ibmvnic_tx_pool *tx_pool)
1752 {
1753 struct ibmvnic_tx_buff *tx_buff;
1754 u64 tx_entries;
1755 int i;
1756
1757 if (!tx_pool || !tx_pool->tx_buff)
1758 return;
1759
1760 tx_entries = tx_pool->num_buffers;
1761
1762 for (i = 0; i < tx_entries; i++) {
1763 tx_buff = &tx_pool->tx_buff[i];
1764 if (tx_buff && tx_buff->skb) {
1765 dev_kfree_skb_any(tx_buff->skb);
1766 tx_buff->skb = NULL;
1767 }
1768 }
1769 }
1770
1771 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1772 {
1773 int tx_scrqs;
1774 int i;
1775
1776 if (!adapter->tx_pool || !adapter->tso_pool)
1777 return;
1778
1779 tx_scrqs = adapter->num_active_tx_pools;
1780
1781
1782 for (i = 0; i < tx_scrqs; i++) {
1783 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1784 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1785 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1786 }
1787 }
1788
1789 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1790 {
1791 struct net_device *netdev = adapter->netdev;
1792 int i;
1793
1794 if (adapter->tx_scrq) {
1795 for (i = 0; i < adapter->req_tx_queues; i++)
1796 if (adapter->tx_scrq[i]->irq) {
1797 netdev_dbg(netdev,
1798 "Disabling tx_scrq[%d] irq\n", i);
1799 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1800 disable_irq(adapter->tx_scrq[i]->irq);
1801 }
1802 }
1803
1804 if (adapter->rx_scrq) {
1805 for (i = 0; i < adapter->req_rx_queues; i++) {
1806 if (adapter->rx_scrq[i]->irq) {
1807 netdev_dbg(netdev,
1808 "Disabling rx_scrq[%d] irq\n", i);
1809 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1810 disable_irq(adapter->rx_scrq[i]->irq);
1811 }
1812 }
1813 }
1814 }
1815
1816 static void ibmvnic_cleanup(struct net_device *netdev)
1817 {
1818 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1819
1820
1821
1822 adapter->tx_queues_active = false;
1823
1824
1825
1826
1827 synchronize_rcu();
1828
1829 if (test_bit(0, &adapter->resetting))
1830 netif_tx_disable(netdev);
1831 else
1832 netif_tx_stop_all_queues(netdev);
1833
1834 ibmvnic_napi_disable(adapter);
1835 ibmvnic_disable_irqs(adapter);
1836 }
1837
1838 static int __ibmvnic_close(struct net_device *netdev)
1839 {
1840 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1841 int rc = 0;
1842
1843 adapter->state = VNIC_CLOSING;
1844 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1845 adapter->state = VNIC_CLOSED;
1846 return rc;
1847 }
1848
1849 static int ibmvnic_close(struct net_device *netdev)
1850 {
1851 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1852 int rc;
1853
1854 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1855 adapter_state_to_string(adapter->state),
1856 adapter->failover_pending,
1857 adapter->force_reset_recovery);
1858
1859
1860
1861
1862 if (adapter->failover_pending) {
1863 adapter->state = VNIC_CLOSED;
1864 return 0;
1865 }
1866
1867 rc = __ibmvnic_close(netdev);
1868 ibmvnic_cleanup(netdev);
1869 clean_rx_pools(adapter);
1870 clean_tx_pools(adapter);
1871
1872 return rc;
1873 }
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1887 int *hdr_len, u8 *hdr_data)
1888 {
1889 int len = 0;
1890 u8 *hdr;
1891
1892 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1893 hdr_len[0] = sizeof(struct vlan_ethhdr);
1894 else
1895 hdr_len[0] = sizeof(struct ethhdr);
1896
1897 if (skb->protocol == htons(ETH_P_IP)) {
1898 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1899 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1900 hdr_len[2] = tcp_hdrlen(skb);
1901 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1902 hdr_len[2] = sizeof(struct udphdr);
1903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1904 hdr_len[1] = sizeof(struct ipv6hdr);
1905 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1906 hdr_len[2] = tcp_hdrlen(skb);
1907 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1908 hdr_len[2] = sizeof(struct udphdr);
1909 } else if (skb->protocol == htons(ETH_P_ARP)) {
1910 hdr_len[1] = arp_hdr_len(skb->dev);
1911 hdr_len[2] = 0;
1912 }
1913
1914 memset(hdr_data, 0, 120);
1915 if ((hdr_field >> 6) & 1) {
1916 hdr = skb_mac_header(skb);
1917 memcpy(hdr_data, hdr, hdr_len[0]);
1918 len += hdr_len[0];
1919 }
1920
1921 if ((hdr_field >> 5) & 1) {
1922 hdr = skb_network_header(skb);
1923 memcpy(hdr_data + len, hdr, hdr_len[1]);
1924 len += hdr_len[1];
1925 }
1926
1927 if ((hdr_field >> 4) & 1) {
1928 hdr = skb_transport_header(skb);
1929 memcpy(hdr_data + len, hdr, hdr_len[2]);
1930 len += hdr_len[2];
1931 }
1932 return len;
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1948 union sub_crq *scrq_arr)
1949 {
1950 union sub_crq hdr_desc;
1951 int tmp_len = len;
1952 int num_descs = 0;
1953 u8 *data, *cur;
1954 int tmp;
1955
1956 while (tmp_len > 0) {
1957 cur = hdr_data + len - tmp_len;
1958
1959 memset(&hdr_desc, 0, sizeof(hdr_desc));
1960 if (cur != hdr_data) {
1961 data = hdr_desc.hdr_ext.data;
1962 tmp = tmp_len > 29 ? 29 : tmp_len;
1963 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1964 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1965 hdr_desc.hdr_ext.len = tmp;
1966 } else {
1967 data = hdr_desc.hdr.data;
1968 tmp = tmp_len > 24 ? 24 : tmp_len;
1969 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1970 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1971 hdr_desc.hdr.len = tmp;
1972 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1973 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1974 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1975 hdr_desc.hdr.flag = hdr_field << 1;
1976 }
1977 memcpy(data, cur, tmp);
1978 tmp_len -= tmp;
1979 *scrq_arr = hdr_desc;
1980 scrq_arr++;
1981 num_descs++;
1982 }
1983
1984 return num_descs;
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998 static void build_hdr_descs_arr(struct sk_buff *skb,
1999 union sub_crq *indir_arr,
2000 int *num_entries, u8 hdr_field)
2001 {
2002 int hdr_len[3] = {0, 0, 0};
2003 u8 hdr_data[140] = {0};
2004 int tot_len;
2005
2006 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
2007 hdr_data);
2008 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
2009 indir_arr + 1);
2010 }
2011
2012 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2013 struct net_device *netdev)
2014 {
2015
2016
2017
2018
2019
2020
2021 if (skb->len < netdev->min_mtu)
2022 return skb_put_padto(skb, netdev->min_mtu);
2023
2024 return 0;
2025 }
2026
2027 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2028 struct ibmvnic_sub_crq_queue *tx_scrq)
2029 {
2030 struct ibmvnic_ind_xmit_queue *ind_bufp;
2031 struct ibmvnic_tx_buff *tx_buff;
2032 struct ibmvnic_tx_pool *tx_pool;
2033 union sub_crq tx_scrq_entry;
2034 int queue_num;
2035 int entries;
2036 int index;
2037 int i;
2038
2039 ind_bufp = &tx_scrq->ind_buf;
2040 entries = (u64)ind_bufp->index;
2041 queue_num = tx_scrq->pool_index;
2042
2043 for (i = entries - 1; i >= 0; --i) {
2044 tx_scrq_entry = ind_bufp->indir_arr[i];
2045 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2046 continue;
2047 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2048 if (index & IBMVNIC_TSO_POOL_MASK) {
2049 tx_pool = &adapter->tso_pool[queue_num];
2050 index &= ~IBMVNIC_TSO_POOL_MASK;
2051 } else {
2052 tx_pool = &adapter->tx_pool[queue_num];
2053 }
2054 tx_pool->free_map[tx_pool->consumer_index] = index;
2055 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2056 tx_pool->num_buffers - 1 :
2057 tx_pool->consumer_index - 1;
2058 tx_buff = &tx_pool->tx_buff[index];
2059 adapter->netdev->stats.tx_packets--;
2060 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2061 adapter->tx_stats_buffers[queue_num].packets--;
2062 adapter->tx_stats_buffers[queue_num].bytes -=
2063 tx_buff->skb->len;
2064 dev_kfree_skb_any(tx_buff->skb);
2065 tx_buff->skb = NULL;
2066 adapter->netdev->stats.tx_dropped++;
2067 }
2068
2069 ind_bufp->index = 0;
2070
2071 if (atomic_sub_return(entries, &tx_scrq->used) <=
2072 (adapter->req_tx_entries_per_subcrq / 2) &&
2073 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2074 rcu_read_lock();
2075
2076 if (adapter->tx_queues_active) {
2077 netif_wake_subqueue(adapter->netdev, queue_num);
2078 netdev_dbg(adapter->netdev, "Started queue %d\n",
2079 queue_num);
2080 }
2081
2082 rcu_read_unlock();
2083 }
2084 }
2085
2086 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2087 struct ibmvnic_sub_crq_queue *tx_scrq)
2088 {
2089 struct ibmvnic_ind_xmit_queue *ind_bufp;
2090 u64 dma_addr;
2091 u64 entries;
2092 u64 handle;
2093 int rc;
2094
2095 ind_bufp = &tx_scrq->ind_buf;
2096 dma_addr = (u64)ind_bufp->indir_dma;
2097 entries = (u64)ind_bufp->index;
2098 handle = tx_scrq->handle;
2099
2100 if (!entries)
2101 return 0;
2102 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2103 if (rc)
2104 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2105 else
2106 ind_bufp->index = 0;
2107 return 0;
2108 }
2109
2110 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2111 {
2112 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2113 int queue_num = skb_get_queue_mapping(skb);
2114 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2115 struct device *dev = &adapter->vdev->dev;
2116 struct ibmvnic_ind_xmit_queue *ind_bufp;
2117 struct ibmvnic_tx_buff *tx_buff = NULL;
2118 struct ibmvnic_sub_crq_queue *tx_scrq;
2119 struct ibmvnic_long_term_buff *ltb;
2120 struct ibmvnic_tx_pool *tx_pool;
2121 unsigned int tx_send_failed = 0;
2122 netdev_tx_t ret = NETDEV_TX_OK;
2123 unsigned int tx_map_failed = 0;
2124 union sub_crq indir_arr[16];
2125 unsigned int tx_dropped = 0;
2126 unsigned int tx_packets = 0;
2127 unsigned int tx_bytes = 0;
2128 dma_addr_t data_dma_addr;
2129 struct netdev_queue *txq;
2130 unsigned long lpar_rc;
2131 union sub_crq tx_crq;
2132 unsigned int offset;
2133 int num_entries = 1;
2134 unsigned char *dst;
2135 int bufidx = 0;
2136 u8 proto = 0;
2137
2138
2139
2140
2141
2142 rcu_read_lock();
2143 if (!adapter->tx_queues_active) {
2144 dev_kfree_skb_any(skb);
2145
2146 tx_send_failed++;
2147 tx_dropped++;
2148 ret = NETDEV_TX_OK;
2149 goto out;
2150 }
2151
2152 tx_scrq = adapter->tx_scrq[queue_num];
2153 txq = netdev_get_tx_queue(netdev, queue_num);
2154 ind_bufp = &tx_scrq->ind_buf;
2155
2156 if (ibmvnic_xmit_workarounds(skb, netdev)) {
2157 tx_dropped++;
2158 tx_send_failed++;
2159 ret = NETDEV_TX_OK;
2160 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2161 goto out;
2162 }
2163
2164 if (skb_is_gso(skb))
2165 tx_pool = &adapter->tso_pool[queue_num];
2166 else
2167 tx_pool = &adapter->tx_pool[queue_num];
2168
2169 bufidx = tx_pool->free_map[tx_pool->consumer_index];
2170
2171 if (bufidx == IBMVNIC_INVALID_MAP) {
2172 dev_kfree_skb_any(skb);
2173 tx_send_failed++;
2174 tx_dropped++;
2175 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2176 ret = NETDEV_TX_OK;
2177 goto out;
2178 }
2179
2180 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2181
2182 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset);
2183
2184 dst = ltb->buff + offset;
2185 memset(dst, 0, tx_pool->buf_size);
2186 data_dma_addr = ltb->addr + offset;
2187
2188 if (skb_shinfo(skb)->nr_frags) {
2189 int cur, i;
2190
2191
2192 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2193 cur = skb_headlen(skb);
2194
2195
2196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2197 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2198
2199 memcpy(dst + cur, skb_frag_address(frag),
2200 skb_frag_size(frag));
2201 cur += skb_frag_size(frag);
2202 }
2203 } else {
2204 skb_copy_from_linear_data(skb, dst, skb->len);
2205 }
2206
2207
2208 dma_wmb();
2209
2210 tx_pool->consumer_index =
2211 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2212
2213 tx_buff = &tx_pool->tx_buff[bufidx];
2214 tx_buff->skb = skb;
2215 tx_buff->index = bufidx;
2216 tx_buff->pool_index = queue_num;
2217
2218 memset(&tx_crq, 0, sizeof(tx_crq));
2219 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2220 tx_crq.v1.type = IBMVNIC_TX_DESC;
2221 tx_crq.v1.n_crq_elem = 1;
2222 tx_crq.v1.n_sge = 1;
2223 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2224
2225 if (skb_is_gso(skb))
2226 tx_crq.v1.correlator =
2227 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2228 else
2229 tx_crq.v1.correlator = cpu_to_be32(bufidx);
2230 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2231 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2232 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2233
2234 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2235 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2236 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2237 }
2238
2239 if (skb->protocol == htons(ETH_P_IP)) {
2240 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2241 proto = ip_hdr(skb)->protocol;
2242 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2243 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2244 proto = ipv6_hdr(skb)->nexthdr;
2245 }
2246
2247 if (proto == IPPROTO_TCP)
2248 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2249 else if (proto == IPPROTO_UDP)
2250 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2251
2252 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2253 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2254 hdrs += 2;
2255 }
2256 if (skb_is_gso(skb)) {
2257 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2258 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2259 hdrs += 2;
2260 }
2261
2262 if ((*hdrs >> 7) & 1)
2263 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2264
2265 tx_crq.v1.n_crq_elem = num_entries;
2266 tx_buff->num_entries = num_entries;
2267
2268 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2269 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2270 if (lpar_rc != H_SUCCESS)
2271 goto tx_flush_err;
2272 }
2273
2274 indir_arr[0] = tx_crq;
2275 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2276 num_entries * sizeof(struct ibmvnic_generic_scrq));
2277 ind_bufp->index += num_entries;
2278 if (__netdev_tx_sent_queue(txq, skb->len,
2279 netdev_xmit_more() &&
2280 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2281 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2282 if (lpar_rc != H_SUCCESS)
2283 goto tx_err;
2284 }
2285
2286 if (atomic_add_return(num_entries, &tx_scrq->used)
2287 >= adapter->req_tx_entries_per_subcrq) {
2288 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2289 netif_stop_subqueue(netdev, queue_num);
2290 }
2291
2292 tx_packets++;
2293 tx_bytes += skb->len;
2294 txq_trans_cond_update(txq);
2295 ret = NETDEV_TX_OK;
2296 goto out;
2297
2298 tx_flush_err:
2299 dev_kfree_skb_any(skb);
2300 tx_buff->skb = NULL;
2301 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2302 tx_pool->num_buffers - 1 :
2303 tx_pool->consumer_index - 1;
2304 tx_dropped++;
2305 tx_err:
2306 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2307 dev_err_ratelimited(dev, "tx: send failed\n");
2308
2309 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2310
2311
2312
2313
2314
2315 netif_tx_stop_all_queues(netdev);
2316 netif_carrier_off(netdev);
2317 }
2318 out:
2319 rcu_read_unlock();
2320 netdev->stats.tx_dropped += tx_dropped;
2321 netdev->stats.tx_bytes += tx_bytes;
2322 netdev->stats.tx_packets += tx_packets;
2323 adapter->tx_send_failed += tx_send_failed;
2324 adapter->tx_map_failed += tx_map_failed;
2325 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2326 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2327 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2328
2329 return ret;
2330 }
2331
2332 static void ibmvnic_set_multi(struct net_device *netdev)
2333 {
2334 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2335 struct netdev_hw_addr *ha;
2336 union ibmvnic_crq crq;
2337
2338 memset(&crq, 0, sizeof(crq));
2339 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2340 crq.request_capability.cmd = REQUEST_CAPABILITY;
2341
2342 if (netdev->flags & IFF_PROMISC) {
2343 if (!adapter->promisc_supported)
2344 return;
2345 } else {
2346 if (netdev->flags & IFF_ALLMULTI) {
2347
2348 memset(&crq, 0, sizeof(crq));
2349 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2350 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2351 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2352 ibmvnic_send_crq(adapter, &crq);
2353 } else if (netdev_mc_empty(netdev)) {
2354
2355 memset(&crq, 0, sizeof(crq));
2356 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2357 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2358 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2359 ibmvnic_send_crq(adapter, &crq);
2360 } else {
2361
2362 netdev_for_each_mc_addr(ha, netdev) {
2363 memset(&crq, 0, sizeof(crq));
2364 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2365 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2366 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2367 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2368 ha->addr);
2369 ibmvnic_send_crq(adapter, &crq);
2370 }
2371 }
2372 }
2373 }
2374
2375 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2376 {
2377 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2378 union ibmvnic_crq crq;
2379 int rc;
2380
2381 if (!is_valid_ether_addr(dev_addr)) {
2382 rc = -EADDRNOTAVAIL;
2383 goto err;
2384 }
2385
2386 memset(&crq, 0, sizeof(crq));
2387 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2388 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2389 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2390
2391 mutex_lock(&adapter->fw_lock);
2392 adapter->fw_done_rc = 0;
2393 reinit_completion(&adapter->fw_done);
2394
2395 rc = ibmvnic_send_crq(adapter, &crq);
2396 if (rc) {
2397 rc = -EIO;
2398 mutex_unlock(&adapter->fw_lock);
2399 goto err;
2400 }
2401
2402 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2403
2404 if (rc || adapter->fw_done_rc) {
2405 rc = -EIO;
2406 mutex_unlock(&adapter->fw_lock);
2407 goto err;
2408 }
2409 mutex_unlock(&adapter->fw_lock);
2410 return 0;
2411 err:
2412 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2413 return rc;
2414 }
2415
2416 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2417 {
2418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2419 struct sockaddr *addr = p;
2420 int rc;
2421
2422 rc = 0;
2423 if (!is_valid_ether_addr(addr->sa_data))
2424 return -EADDRNOTAVAIL;
2425
2426 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2427 if (adapter->state != VNIC_PROBED)
2428 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2429
2430 return rc;
2431 }
2432
2433 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2434 {
2435 switch (reason) {
2436 case VNIC_RESET_FAILOVER:
2437 return "FAILOVER";
2438 case VNIC_RESET_MOBILITY:
2439 return "MOBILITY";
2440 case VNIC_RESET_FATAL:
2441 return "FATAL";
2442 case VNIC_RESET_NON_FATAL:
2443 return "NON_FATAL";
2444 case VNIC_RESET_TIMEOUT:
2445 return "TIMEOUT";
2446 case VNIC_RESET_CHANGE_PARAM:
2447 return "CHANGE_PARAM";
2448 case VNIC_RESET_PASSIVE_INIT:
2449 return "PASSIVE_INIT";
2450 }
2451 return "UNKNOWN";
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2462 {
2463 reinit_completion(&adapter->init_done);
2464 adapter->init_done_rc = 0;
2465 }
2466
2467
2468
2469
2470
2471 static int do_reset(struct ibmvnic_adapter *adapter,
2472 struct ibmvnic_rwi *rwi, u32 reset_state)
2473 {
2474 struct net_device *netdev = adapter->netdev;
2475 u64 old_num_rx_queues, old_num_tx_queues;
2476 u64 old_num_rx_slots, old_num_tx_slots;
2477 int rc;
2478
2479 netdev_dbg(adapter->netdev,
2480 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2481 adapter_state_to_string(adapter->state),
2482 adapter->failover_pending,
2483 reset_reason_to_string(rwi->reset_reason),
2484 adapter_state_to_string(reset_state));
2485
2486 adapter->reset_reason = rwi->reset_reason;
2487
2488 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2489 rtnl_lock();
2490
2491
2492
2493
2494
2495 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2496 adapter->failover_pending = false;
2497
2498
2499 reset_state = adapter->state;
2500
2501 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2502 rc = -EBUSY;
2503 goto out;
2504 }
2505
2506 netif_carrier_off(netdev);
2507
2508 old_num_rx_queues = adapter->req_rx_queues;
2509 old_num_tx_queues = adapter->req_tx_queues;
2510 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2511 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2512
2513 ibmvnic_cleanup(netdev);
2514
2515 if (reset_state == VNIC_OPEN &&
2516 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2517 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2518 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2519 rc = __ibmvnic_close(netdev);
2520 if (rc)
2521 goto out;
2522 } else {
2523 adapter->state = VNIC_CLOSING;
2524
2525
2526
2527
2528
2529
2530 rtnl_unlock();
2531 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2532 rtnl_lock();
2533 if (rc)
2534 goto out;
2535
2536 if (adapter->state == VNIC_OPEN) {
2537
2538
2539
2540
2541
2542
2543 netdev_dbg(netdev,
2544 "Open changed state from %s, updating.\n",
2545 adapter_state_to_string(reset_state));
2546 reset_state = VNIC_OPEN;
2547 adapter->state = VNIC_CLOSING;
2548 }
2549
2550 if (adapter->state != VNIC_CLOSING) {
2551
2552
2553
2554 rc = -EAGAIN;
2555 goto out;
2556 }
2557 adapter->state = VNIC_CLOSED;
2558 }
2559 }
2560
2561 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2562 release_resources(adapter);
2563 release_sub_crqs(adapter, 1);
2564 release_crq_queue(adapter);
2565 }
2566
2567 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2568
2569
2570
2571 adapter->state = VNIC_PROBED;
2572
2573 reinit_init_done(adapter);
2574
2575 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2576 rc = init_crq_queue(adapter);
2577 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2578 rc = ibmvnic_reenable_crq_queue(adapter);
2579 release_sub_crqs(adapter, 1);
2580 } else {
2581 rc = ibmvnic_reset_crq(adapter);
2582 if (rc == H_CLOSED || rc == H_SUCCESS) {
2583 rc = vio_enable_interrupts(adapter->vdev);
2584 if (rc)
2585 netdev_err(adapter->netdev,
2586 "Reset failed to enable interrupts. rc=%d\n",
2587 rc);
2588 }
2589 }
2590
2591 if (rc) {
2592 netdev_err(adapter->netdev,
2593 "Reset couldn't initialize crq. rc=%d\n", rc);
2594 goto out;
2595 }
2596
2597 rc = ibmvnic_reset_init(adapter, true);
2598 if (rc)
2599 goto out;
2600
2601
2602
2603
2604 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2605 rc = 0;
2606 goto out;
2607 }
2608
2609 rc = ibmvnic_login(netdev);
2610 if (rc)
2611 goto out;
2612
2613 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2614 rc = init_resources(adapter);
2615 if (rc)
2616 goto out;
2617 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2618 adapter->req_tx_queues != old_num_tx_queues ||
2619 adapter->req_rx_add_entries_per_subcrq !=
2620 old_num_rx_slots ||
2621 adapter->req_tx_entries_per_subcrq !=
2622 old_num_tx_slots ||
2623 !adapter->rx_pool ||
2624 !adapter->tso_pool ||
2625 !adapter->tx_pool) {
2626 release_napi(adapter);
2627 release_vpd_data(adapter);
2628
2629 rc = init_resources(adapter);
2630 if (rc)
2631 goto out;
2632
2633 } else {
2634 rc = init_tx_pools(netdev);
2635 if (rc) {
2636 netdev_dbg(netdev,
2637 "init tx pools failed (%d)\n",
2638 rc);
2639 goto out;
2640 }
2641
2642 rc = init_rx_pools(netdev);
2643 if (rc) {
2644 netdev_dbg(netdev,
2645 "init rx pools failed (%d)\n",
2646 rc);
2647 goto out;
2648 }
2649 }
2650 ibmvnic_disable_irqs(adapter);
2651 }
2652 adapter->state = VNIC_CLOSED;
2653
2654 if (reset_state == VNIC_CLOSED) {
2655 rc = 0;
2656 goto out;
2657 }
2658
2659 rc = __ibmvnic_open(netdev);
2660 if (rc) {
2661 rc = IBMVNIC_OPEN_FAILED;
2662 goto out;
2663 }
2664
2665
2666 ibmvnic_set_multi(netdev);
2667
2668 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2669 adapter->reset_reason == VNIC_RESET_MOBILITY)
2670 __netdev_notify_peers(netdev);
2671
2672 rc = 0;
2673
2674 out:
2675
2676 if (rc)
2677 adapter->state = reset_state;
2678
2679 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2680 rtnl_unlock();
2681
2682 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2683 adapter_state_to_string(adapter->state),
2684 adapter->failover_pending, rc);
2685 return rc;
2686 }
2687
2688 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2689 struct ibmvnic_rwi *rwi, u32 reset_state)
2690 {
2691 struct net_device *netdev = adapter->netdev;
2692 int rc;
2693
2694 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2695 reset_reason_to_string(rwi->reset_reason));
2696
2697
2698 reset_state = adapter->state;
2699
2700 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2701 rc = -EBUSY;
2702 goto out;
2703 }
2704
2705 netif_carrier_off(netdev);
2706 adapter->reset_reason = rwi->reset_reason;
2707
2708 ibmvnic_cleanup(netdev);
2709 release_resources(adapter);
2710 release_sub_crqs(adapter, 0);
2711 release_crq_queue(adapter);
2712
2713
2714
2715
2716 adapter->state = VNIC_PROBED;
2717
2718 reinit_init_done(adapter);
2719
2720 rc = init_crq_queue(adapter);
2721 if (rc) {
2722 netdev_err(adapter->netdev,
2723 "Couldn't initialize crq. rc=%d\n", rc);
2724 goto out;
2725 }
2726
2727 rc = ibmvnic_reset_init(adapter, false);
2728 if (rc)
2729 goto out;
2730
2731
2732
2733
2734 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2735 goto out;
2736
2737 rc = ibmvnic_login(netdev);
2738 if (rc)
2739 goto out;
2740
2741 rc = init_resources(adapter);
2742 if (rc)
2743 goto out;
2744
2745 ibmvnic_disable_irqs(adapter);
2746 adapter->state = VNIC_CLOSED;
2747
2748 if (reset_state == VNIC_CLOSED)
2749 goto out;
2750
2751 rc = __ibmvnic_open(netdev);
2752 if (rc) {
2753 rc = IBMVNIC_OPEN_FAILED;
2754 goto out;
2755 }
2756
2757 __netdev_notify_peers(netdev);
2758 out:
2759
2760 if (rc)
2761 adapter->state = reset_state;
2762 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2763 adapter_state_to_string(adapter->state),
2764 adapter->failover_pending, rc);
2765 return rc;
2766 }
2767
2768 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2769 {
2770 struct ibmvnic_rwi *rwi;
2771 unsigned long flags;
2772
2773 spin_lock_irqsave(&adapter->rwi_lock, flags);
2774
2775 if (!list_empty(&adapter->rwi_list)) {
2776 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2777 list);
2778 list_del(&rwi->list);
2779 } else {
2780 rwi = NULL;
2781 }
2782
2783 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2784 return rwi;
2785 }
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801 static int do_passive_init(struct ibmvnic_adapter *adapter)
2802 {
2803 unsigned long timeout = msecs_to_jiffies(30000);
2804 struct net_device *netdev = adapter->netdev;
2805 struct device *dev = &adapter->vdev->dev;
2806 int rc;
2807
2808 netdev_dbg(netdev, "Partner device found, probing.\n");
2809
2810 adapter->state = VNIC_PROBING;
2811 reinit_completion(&adapter->init_done);
2812 adapter->init_done_rc = 0;
2813 adapter->crq.active = true;
2814
2815 rc = send_crq_init_complete(adapter);
2816 if (rc)
2817 goto out;
2818
2819 rc = send_version_xchg(adapter);
2820 if (rc)
2821 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2822
2823 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2824 dev_err(dev, "Initialization sequence timed out\n");
2825 rc = -ETIMEDOUT;
2826 goto out;
2827 }
2828
2829 rc = init_sub_crqs(adapter);
2830 if (rc) {
2831 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2832 goto out;
2833 }
2834
2835 rc = init_sub_crq_irqs(adapter);
2836 if (rc) {
2837 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2838 goto init_failed;
2839 }
2840
2841 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2842 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2843 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2844
2845 adapter->state = VNIC_PROBED;
2846 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2847
2848 return 0;
2849
2850 init_failed:
2851 release_sub_crqs(adapter, 1);
2852 out:
2853 adapter->state = VNIC_DOWN;
2854 return rc;
2855 }
2856
2857 static void __ibmvnic_reset(struct work_struct *work)
2858 {
2859 struct ibmvnic_adapter *adapter;
2860 unsigned int timeout = 5000;
2861 struct ibmvnic_rwi *tmprwi;
2862 bool saved_state = false;
2863 struct ibmvnic_rwi *rwi;
2864 unsigned long flags;
2865 struct device *dev;
2866 bool need_reset;
2867 int num_fails = 0;
2868 u32 reset_state;
2869 int rc = 0;
2870
2871 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2872 dev = &adapter->vdev->dev;
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 if (adapter->state == VNIC_PROBING &&
2884 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
2885 dev_err(dev, "Reset thread timed out on probe");
2886 queue_delayed_work(system_long_wq,
2887 &adapter->ibmvnic_delayed_reset,
2888 IBMVNIC_RESET_DELAY);
2889 return;
2890 }
2891
2892
2893 if (adapter->state == VNIC_REMOVING)
2894 return;
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920 need_reset = false;
2921 spin_lock(&adapter->rwi_lock);
2922 if (!list_empty(&adapter->rwi_list)) {
2923 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2924 queue_delayed_work(system_long_wq,
2925 &adapter->ibmvnic_delayed_reset,
2926 IBMVNIC_RESET_DELAY);
2927 } else {
2928 need_reset = true;
2929 }
2930 }
2931 spin_unlock(&adapter->rwi_lock);
2932
2933 if (!need_reset)
2934 return;
2935
2936 rwi = get_next_rwi(adapter);
2937 while (rwi) {
2938 spin_lock_irqsave(&adapter->state_lock, flags);
2939
2940 if (adapter->state == VNIC_REMOVING ||
2941 adapter->state == VNIC_REMOVED) {
2942 spin_unlock_irqrestore(&adapter->state_lock, flags);
2943 kfree(rwi);
2944 rc = EBUSY;
2945 break;
2946 }
2947
2948 if (!saved_state) {
2949 reset_state = adapter->state;
2950 saved_state = true;
2951 }
2952 spin_unlock_irqrestore(&adapter->state_lock, flags);
2953
2954 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2955 rtnl_lock();
2956 rc = do_passive_init(adapter);
2957 rtnl_unlock();
2958 if (!rc)
2959 netif_carrier_on(adapter->netdev);
2960 } else if (adapter->force_reset_recovery) {
2961
2962
2963
2964
2965 adapter->failover_pending = false;
2966
2967
2968 if (adapter->wait_for_reset) {
2969
2970 adapter->force_reset_recovery = false;
2971 rc = do_hard_reset(adapter, rwi, reset_state);
2972 } else {
2973 rtnl_lock();
2974 adapter->force_reset_recovery = false;
2975 rc = do_hard_reset(adapter, rwi, reset_state);
2976 rtnl_unlock();
2977 }
2978 if (rc)
2979 num_fails++;
2980 else
2981 num_fails = 0;
2982
2983
2984
2985
2986
2987
2988
2989
2990 if (num_fails >= 3) {
2991 netdev_dbg(adapter->netdev,
2992 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2993 adapter_state_to_string(adapter->state),
2994 num_fails);
2995 set_current_state(TASK_UNINTERRUPTIBLE);
2996 schedule_timeout(60 * HZ);
2997 }
2998 } else {
2999 rc = do_reset(adapter, rwi, reset_state);
3000 }
3001 tmprwi = rwi;
3002 adapter->last_reset_time = jiffies;
3003
3004 if (rc)
3005 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3006
3007 rwi = get_next_rwi(adapter);
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019 if (rwi)
3020 kfree(tmprwi);
3021 else if (rc)
3022 rwi = tmprwi;
3023
3024 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3025 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3026 adapter->force_reset_recovery = true;
3027 }
3028
3029 if (adapter->wait_for_reset) {
3030 adapter->reset_done_rc = rc;
3031 complete(&adapter->reset_done);
3032 }
3033
3034 clear_bit_unlock(0, &adapter->resetting);
3035
3036 netdev_dbg(adapter->netdev,
3037 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3038 adapter_state_to_string(adapter->state),
3039 adapter->force_reset_recovery,
3040 adapter->wait_for_reset);
3041 }
3042
3043 static void __ibmvnic_delayed_reset(struct work_struct *work)
3044 {
3045 struct ibmvnic_adapter *adapter;
3046
3047 adapter = container_of(work, struct ibmvnic_adapter,
3048 ibmvnic_delayed_reset.work);
3049 __ibmvnic_reset(&adapter->ibmvnic_reset);
3050 }
3051
3052 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3053 {
3054 struct list_head *entry, *tmp_entry;
3055
3056 if (!list_empty(&adapter->rwi_list)) {
3057 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3058 list_del(entry);
3059 kfree(list_entry(entry, struct ibmvnic_rwi, list));
3060 }
3061 }
3062 }
3063
3064 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3065 enum ibmvnic_reset_reason reason)
3066 {
3067 struct net_device *netdev = adapter->netdev;
3068 struct ibmvnic_rwi *rwi, *tmp;
3069 unsigned long flags;
3070 int ret;
3071
3072 spin_lock_irqsave(&adapter->rwi_lock, flags);
3073
3074
3075
3076
3077
3078
3079 if (adapter->state == VNIC_REMOVING ||
3080 adapter->state == VNIC_REMOVED ||
3081 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3082 ret = EBUSY;
3083 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3084 goto err;
3085 }
3086
3087 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3088 if (tmp->reset_reason == reason) {
3089 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3090 reset_reason_to_string(reason));
3091 ret = EBUSY;
3092 goto err;
3093 }
3094 }
3095
3096 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3097 if (!rwi) {
3098 ret = ENOMEM;
3099 goto err;
3100 }
3101
3102
3103
3104 if (adapter->force_reset_recovery)
3105 flush_reset_queue(adapter);
3106
3107 rwi->reset_reason = reason;
3108 list_add_tail(&rwi->list, &adapter->rwi_list);
3109 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3110 reset_reason_to_string(reason));
3111 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3112
3113 ret = 0;
3114 err:
3115
3116 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3117
3118 if (ret == ENOMEM)
3119 ibmvnic_close(netdev);
3120
3121 return -ret;
3122 }
3123
3124 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3125 {
3126 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3127
3128 if (test_bit(0, &adapter->resetting)) {
3129 netdev_err(adapter->netdev,
3130 "Adapter is resetting, skip timeout reset\n");
3131 return;
3132 }
3133
3134
3135
3136 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3137 netdev_dbg(dev, "Not yet time to tx timeout.\n");
3138 return;
3139 }
3140 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3141 }
3142
3143 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3144 struct ibmvnic_rx_buff *rx_buff)
3145 {
3146 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3147
3148 rx_buff->skb = NULL;
3149
3150 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3151 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3152
3153 atomic_dec(&pool->available);
3154 }
3155
3156 static int ibmvnic_poll(struct napi_struct *napi, int budget)
3157 {
3158 struct ibmvnic_sub_crq_queue *rx_scrq;
3159 struct ibmvnic_adapter *adapter;
3160 struct net_device *netdev;
3161 int frames_processed;
3162 int scrq_num;
3163
3164 netdev = napi->dev;
3165 adapter = netdev_priv(netdev);
3166 scrq_num = (int)(napi - adapter->napi);
3167 frames_processed = 0;
3168 rx_scrq = adapter->rx_scrq[scrq_num];
3169
3170 restart_poll:
3171 while (frames_processed < budget) {
3172 struct sk_buff *skb;
3173 struct ibmvnic_rx_buff *rx_buff;
3174 union sub_crq *next;
3175 u32 length;
3176 u16 offset;
3177 u8 flags = 0;
3178
3179 if (unlikely(test_bit(0, &adapter->resetting) &&
3180 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3181 enable_scrq_irq(adapter, rx_scrq);
3182 napi_complete_done(napi, frames_processed);
3183 return frames_processed;
3184 }
3185
3186 if (!pending_scrq(adapter, rx_scrq))
3187 break;
3188 next = ibmvnic_next_scrq(adapter, rx_scrq);
3189 rx_buff = (struct ibmvnic_rx_buff *)
3190 be64_to_cpu(next->rx_comp.correlator);
3191
3192 if (next->rx_comp.rc) {
3193 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3194 be16_to_cpu(next->rx_comp.rc));
3195
3196 next->rx_comp.first = 0;
3197 dev_kfree_skb_any(rx_buff->skb);
3198 remove_buff_from_pool(adapter, rx_buff);
3199 continue;
3200 } else if (!rx_buff->skb) {
3201
3202 next->rx_comp.first = 0;
3203 remove_buff_from_pool(adapter, rx_buff);
3204 continue;
3205 }
3206
3207 length = be32_to_cpu(next->rx_comp.len);
3208 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3209 flags = next->rx_comp.flags;
3210 skb = rx_buff->skb;
3211
3212 dma_rmb();
3213 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3214 length);
3215
3216
3217
3218
3219 if (adapter->rx_vlan_header_insertion &&
3220 (flags & IBMVNIC_VLAN_STRIPPED))
3221 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3222 ntohs(next->rx_comp.vlan_tci));
3223
3224
3225 next->rx_comp.first = 0;
3226 remove_buff_from_pool(adapter, rx_buff);
3227
3228 skb_put(skb, length);
3229 skb->protocol = eth_type_trans(skb, netdev);
3230 skb_record_rx_queue(skb, scrq_num);
3231
3232 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3233 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3234 skb->ip_summed = CHECKSUM_UNNECESSARY;
3235 }
3236
3237 length = skb->len;
3238 napi_gro_receive(napi, skb);
3239 netdev->stats.rx_packets++;
3240 netdev->stats.rx_bytes += length;
3241 adapter->rx_stats_buffers[scrq_num].packets++;
3242 adapter->rx_stats_buffers[scrq_num].bytes += length;
3243 frames_processed++;
3244 }
3245
3246 if (adapter->state != VNIC_CLOSING &&
3247 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3248 adapter->req_rx_add_entries_per_subcrq / 2) ||
3249 frames_processed < budget))
3250 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3251 if (frames_processed < budget) {
3252 if (napi_complete_done(napi, frames_processed)) {
3253 enable_scrq_irq(adapter, rx_scrq);
3254 if (pending_scrq(adapter, rx_scrq)) {
3255 if (napi_reschedule(napi)) {
3256 disable_scrq_irq(adapter, rx_scrq);
3257 goto restart_poll;
3258 }
3259 }
3260 }
3261 }
3262 return frames_processed;
3263 }
3264
3265 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3266 {
3267 int rc, ret;
3268
3269 adapter->fallback.mtu = adapter->req_mtu;
3270 adapter->fallback.rx_queues = adapter->req_rx_queues;
3271 adapter->fallback.tx_queues = adapter->req_tx_queues;
3272 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3273 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3274
3275 reinit_completion(&adapter->reset_done);
3276 adapter->wait_for_reset = true;
3277 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3278
3279 if (rc) {
3280 ret = rc;
3281 goto out;
3282 }
3283 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3284 if (rc) {
3285 ret = -ENODEV;
3286 goto out;
3287 }
3288
3289 ret = 0;
3290 if (adapter->reset_done_rc) {
3291 ret = -EIO;
3292 adapter->desired.mtu = adapter->fallback.mtu;
3293 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3294 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3295 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3296 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3297
3298 reinit_completion(&adapter->reset_done);
3299 adapter->wait_for_reset = true;
3300 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3301 if (rc) {
3302 ret = rc;
3303 goto out;
3304 }
3305 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3306 60000);
3307 if (rc) {
3308 ret = -ENODEV;
3309 goto out;
3310 }
3311 }
3312 out:
3313 adapter->wait_for_reset = false;
3314
3315 return ret;
3316 }
3317
3318 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3319 {
3320 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3321
3322 adapter->desired.mtu = new_mtu + ETH_HLEN;
3323
3324 return wait_for_reset(adapter);
3325 }
3326
3327 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3328 struct net_device *dev,
3329 netdev_features_t features)
3330 {
3331
3332
3333
3334
3335 if (skb_is_gso(skb)) {
3336 if (skb_shinfo(skb)->gso_size < 224 ||
3337 skb_shinfo(skb)->gso_segs == 1)
3338 features &= ~NETIF_F_GSO_MASK;
3339 }
3340
3341 return features;
3342 }
3343
3344 static const struct net_device_ops ibmvnic_netdev_ops = {
3345 .ndo_open = ibmvnic_open,
3346 .ndo_stop = ibmvnic_close,
3347 .ndo_start_xmit = ibmvnic_xmit,
3348 .ndo_set_rx_mode = ibmvnic_set_multi,
3349 .ndo_set_mac_address = ibmvnic_set_mac,
3350 .ndo_validate_addr = eth_validate_addr,
3351 .ndo_tx_timeout = ibmvnic_tx_timeout,
3352 .ndo_change_mtu = ibmvnic_change_mtu,
3353 .ndo_features_check = ibmvnic_features_check,
3354 };
3355
3356
3357
3358 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3359 struct ethtool_link_ksettings *cmd)
3360 {
3361 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3362 int rc;
3363
3364 rc = send_query_phys_parms(adapter);
3365 if (rc) {
3366 adapter->speed = SPEED_UNKNOWN;
3367 adapter->duplex = DUPLEX_UNKNOWN;
3368 }
3369 cmd->base.speed = adapter->speed;
3370 cmd->base.duplex = adapter->duplex;
3371 cmd->base.port = PORT_FIBRE;
3372 cmd->base.phy_address = 0;
3373 cmd->base.autoneg = AUTONEG_ENABLE;
3374
3375 return 0;
3376 }
3377
3378 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3379 struct ethtool_drvinfo *info)
3380 {
3381 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3382
3383 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3384 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3385 strscpy(info->fw_version, adapter->fw_version,
3386 sizeof(info->fw_version));
3387 }
3388
3389 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3390 {
3391 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3392
3393 return adapter->msg_enable;
3394 }
3395
3396 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3397 {
3398 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3399
3400 adapter->msg_enable = data;
3401 }
3402
3403 static u32 ibmvnic_get_link(struct net_device *netdev)
3404 {
3405 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3406
3407
3408
3409
3410 return adapter->logical_link_state;
3411 }
3412
3413 static void ibmvnic_get_ringparam(struct net_device *netdev,
3414 struct ethtool_ringparam *ring,
3415 struct kernel_ethtool_ringparam *kernel_ring,
3416 struct netlink_ext_ack *extack)
3417 {
3418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3419
3420 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3421 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3422 ring->rx_mini_max_pending = 0;
3423 ring->rx_jumbo_max_pending = 0;
3424 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3425 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3426 ring->rx_mini_pending = 0;
3427 ring->rx_jumbo_pending = 0;
3428 }
3429
3430 static int ibmvnic_set_ringparam(struct net_device *netdev,
3431 struct ethtool_ringparam *ring,
3432 struct kernel_ethtool_ringparam *kernel_ring,
3433 struct netlink_ext_ack *extack)
3434 {
3435 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3436
3437 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3438 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3439 netdev_err(netdev, "Invalid request.\n");
3440 netdev_err(netdev, "Max tx buffers = %llu\n",
3441 adapter->max_rx_add_entries_per_subcrq);
3442 netdev_err(netdev, "Max rx buffers = %llu\n",
3443 adapter->max_tx_entries_per_subcrq);
3444 return -EINVAL;
3445 }
3446
3447 adapter->desired.rx_entries = ring->rx_pending;
3448 adapter->desired.tx_entries = ring->tx_pending;
3449
3450 return wait_for_reset(adapter);
3451 }
3452
3453 static void ibmvnic_get_channels(struct net_device *netdev,
3454 struct ethtool_channels *channels)
3455 {
3456 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3457
3458 channels->max_rx = adapter->max_rx_queues;
3459 channels->max_tx = adapter->max_tx_queues;
3460 channels->max_other = 0;
3461 channels->max_combined = 0;
3462 channels->rx_count = adapter->req_rx_queues;
3463 channels->tx_count = adapter->req_tx_queues;
3464 channels->other_count = 0;
3465 channels->combined_count = 0;
3466 }
3467
3468 static int ibmvnic_set_channels(struct net_device *netdev,
3469 struct ethtool_channels *channels)
3470 {
3471 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3472
3473 adapter->desired.rx_queues = channels->rx_count;
3474 adapter->desired.tx_queues = channels->tx_count;
3475
3476 return wait_for_reset(adapter);
3477 }
3478
3479 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3480 {
3481 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3482 int i;
3483
3484 if (stringset != ETH_SS_STATS)
3485 return;
3486
3487 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3488 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3489
3490 for (i = 0; i < adapter->req_tx_queues; i++) {
3491 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3492 data += ETH_GSTRING_LEN;
3493
3494 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3495 data += ETH_GSTRING_LEN;
3496
3497 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
3498 data += ETH_GSTRING_LEN;
3499 }
3500
3501 for (i = 0; i < adapter->req_rx_queues; i++) {
3502 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3503 data += ETH_GSTRING_LEN;
3504
3505 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3506 data += ETH_GSTRING_LEN;
3507
3508 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3509 data += ETH_GSTRING_LEN;
3510 }
3511 }
3512
3513 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3514 {
3515 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3516
3517 switch (sset) {
3518 case ETH_SS_STATS:
3519 return ARRAY_SIZE(ibmvnic_stats) +
3520 adapter->req_tx_queues * NUM_TX_STATS +
3521 adapter->req_rx_queues * NUM_RX_STATS;
3522 default:
3523 return -EOPNOTSUPP;
3524 }
3525 }
3526
3527 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3528 struct ethtool_stats *stats, u64 *data)
3529 {
3530 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3531 union ibmvnic_crq crq;
3532 int i, j;
3533 int rc;
3534
3535 memset(&crq, 0, sizeof(crq));
3536 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3537 crq.request_statistics.cmd = REQUEST_STATISTICS;
3538 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3539 crq.request_statistics.len =
3540 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3541
3542
3543 reinit_completion(&adapter->stats_done);
3544 rc = ibmvnic_send_crq(adapter, &crq);
3545 if (rc)
3546 return;
3547 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3548 if (rc)
3549 return;
3550
3551 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3552 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3553 (adapter, ibmvnic_stats[i].offset));
3554
3555 for (j = 0; j < adapter->req_tx_queues; j++) {
3556 data[i] = adapter->tx_stats_buffers[j].packets;
3557 i++;
3558 data[i] = adapter->tx_stats_buffers[j].bytes;
3559 i++;
3560 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3561 i++;
3562 }
3563
3564 for (j = 0; j < adapter->req_rx_queues; j++) {
3565 data[i] = adapter->rx_stats_buffers[j].packets;
3566 i++;
3567 data[i] = adapter->rx_stats_buffers[j].bytes;
3568 i++;
3569 data[i] = adapter->rx_stats_buffers[j].interrupts;
3570 i++;
3571 }
3572 }
3573
3574 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3575 .get_drvinfo = ibmvnic_get_drvinfo,
3576 .get_msglevel = ibmvnic_get_msglevel,
3577 .set_msglevel = ibmvnic_set_msglevel,
3578 .get_link = ibmvnic_get_link,
3579 .get_ringparam = ibmvnic_get_ringparam,
3580 .set_ringparam = ibmvnic_set_ringparam,
3581 .get_channels = ibmvnic_get_channels,
3582 .set_channels = ibmvnic_set_channels,
3583 .get_strings = ibmvnic_get_strings,
3584 .get_sset_count = ibmvnic_get_sset_count,
3585 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3586 .get_link_ksettings = ibmvnic_get_link_ksettings,
3587 };
3588
3589
3590
3591 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3592 struct ibmvnic_sub_crq_queue *scrq)
3593 {
3594 int rc;
3595
3596 if (!scrq) {
3597 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3598 return -EINVAL;
3599 }
3600
3601 if (scrq->irq) {
3602 free_irq(scrq->irq, scrq);
3603 irq_dispose_mapping(scrq->irq);
3604 scrq->irq = 0;
3605 }
3606
3607 if (scrq->msgs) {
3608 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3609 atomic_set(&scrq->used, 0);
3610 scrq->cur = 0;
3611 scrq->ind_buf.index = 0;
3612 } else {
3613 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3614 return -EINVAL;
3615 }
3616
3617 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3618 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3619 return rc;
3620 }
3621
3622 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3623 {
3624 int i, rc;
3625
3626 if (!adapter->tx_scrq || !adapter->rx_scrq)
3627 return -EINVAL;
3628
3629 for (i = 0; i < adapter->req_tx_queues; i++) {
3630 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3631 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3632 if (rc)
3633 return rc;
3634 }
3635
3636 for (i = 0; i < adapter->req_rx_queues; i++) {
3637 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3638 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3639 if (rc)
3640 return rc;
3641 }
3642
3643 return rc;
3644 }
3645
3646 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3647 struct ibmvnic_sub_crq_queue *scrq,
3648 bool do_h_free)
3649 {
3650 struct device *dev = &adapter->vdev->dev;
3651 long rc;
3652
3653 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3654
3655 if (do_h_free) {
3656
3657 do {
3658 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3659 adapter->vdev->unit_address,
3660 scrq->crq_num);
3661 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3662
3663 if (rc) {
3664 netdev_err(adapter->netdev,
3665 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3666 scrq->crq_num, rc);
3667 }
3668 }
3669
3670 dma_free_coherent(dev,
3671 IBMVNIC_IND_ARR_SZ,
3672 scrq->ind_buf.indir_arr,
3673 scrq->ind_buf.indir_dma);
3674
3675 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3676 DMA_BIDIRECTIONAL);
3677 free_pages((unsigned long)scrq->msgs, 2);
3678 kfree(scrq);
3679 }
3680
3681 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3682 *adapter)
3683 {
3684 struct device *dev = &adapter->vdev->dev;
3685 struct ibmvnic_sub_crq_queue *scrq;
3686 int rc;
3687
3688 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3689 if (!scrq)
3690 return NULL;
3691
3692 scrq->msgs =
3693 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3694 if (!scrq->msgs) {
3695 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3696 goto zero_page_failed;
3697 }
3698
3699 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3700 DMA_BIDIRECTIONAL);
3701 if (dma_mapping_error(dev, scrq->msg_token)) {
3702 dev_warn(dev, "Couldn't map crq queue messages page\n");
3703 goto map_failed;
3704 }
3705
3706 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3707 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3708
3709 if (rc == H_RESOURCE)
3710 rc = ibmvnic_reset_crq(adapter);
3711
3712 if (rc == H_CLOSED) {
3713 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3714 } else if (rc) {
3715 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3716 goto reg_failed;
3717 }
3718
3719 scrq->adapter = adapter;
3720 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3721 scrq->ind_buf.index = 0;
3722
3723 scrq->ind_buf.indir_arr =
3724 dma_alloc_coherent(dev,
3725 IBMVNIC_IND_ARR_SZ,
3726 &scrq->ind_buf.indir_dma,
3727 GFP_KERNEL);
3728
3729 if (!scrq->ind_buf.indir_arr)
3730 goto indir_failed;
3731
3732 spin_lock_init(&scrq->lock);
3733
3734 netdev_dbg(adapter->netdev,
3735 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3736 scrq->crq_num, scrq->hw_irq, scrq->irq);
3737
3738 return scrq;
3739
3740 indir_failed:
3741 do {
3742 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3743 adapter->vdev->unit_address,
3744 scrq->crq_num);
3745 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3746 reg_failed:
3747 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3748 DMA_BIDIRECTIONAL);
3749 map_failed:
3750 free_pages((unsigned long)scrq->msgs, 2);
3751 zero_page_failed:
3752 kfree(scrq);
3753
3754 return NULL;
3755 }
3756
3757 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3758 {
3759 int i;
3760
3761 if (adapter->tx_scrq) {
3762 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3763 if (!adapter->tx_scrq[i])
3764 continue;
3765
3766 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3767 i);
3768 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3769 if (adapter->tx_scrq[i]->irq) {
3770 free_irq(adapter->tx_scrq[i]->irq,
3771 adapter->tx_scrq[i]);
3772 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3773 adapter->tx_scrq[i]->irq = 0;
3774 }
3775
3776 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3777 do_h_free);
3778 }
3779
3780 kfree(adapter->tx_scrq);
3781 adapter->tx_scrq = NULL;
3782 adapter->num_active_tx_scrqs = 0;
3783 }
3784
3785 if (adapter->rx_scrq) {
3786 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3787 if (!adapter->rx_scrq[i])
3788 continue;
3789
3790 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3791 i);
3792 if (adapter->rx_scrq[i]->irq) {
3793 free_irq(adapter->rx_scrq[i]->irq,
3794 adapter->rx_scrq[i]);
3795 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3796 adapter->rx_scrq[i]->irq = 0;
3797 }
3798
3799 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3800 do_h_free);
3801 }
3802
3803 kfree(adapter->rx_scrq);
3804 adapter->rx_scrq = NULL;
3805 adapter->num_active_rx_scrqs = 0;
3806 }
3807 }
3808
3809 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3810 struct ibmvnic_sub_crq_queue *scrq)
3811 {
3812 struct device *dev = &adapter->vdev->dev;
3813 unsigned long rc;
3814
3815 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3816 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3817 if (rc)
3818 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3819 scrq->hw_irq, rc);
3820 return rc;
3821 }
3822
3823
3824
3825
3826 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
3827 {
3828 u64 val = 0xff000000 | scrq->hw_irq;
3829 unsigned long rc;
3830
3831 rc = plpar_hcall_norets(H_EOI, val);
3832 if (rc)
3833 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
3834 }
3835
3836
3837
3838
3839
3840 static void ibmvnic_clear_pending_interrupt(struct device *dev,
3841 struct ibmvnic_sub_crq_queue *scrq)
3842 {
3843 if (!xive_enabled())
3844 ibmvnic_xics_eoi(dev, scrq);
3845 }
3846
3847 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3848 struct ibmvnic_sub_crq_queue *scrq)
3849 {
3850 struct device *dev = &adapter->vdev->dev;
3851 unsigned long rc;
3852
3853 if (scrq->hw_irq > 0x100000000ULL) {
3854 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3855 return 1;
3856 }
3857
3858 if (test_bit(0, &adapter->resetting) &&
3859 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3860 ibmvnic_clear_pending_interrupt(dev, scrq);
3861 }
3862
3863 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3864 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3865 if (rc)
3866 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3867 scrq->hw_irq, rc);
3868 return rc;
3869 }
3870
3871 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3872 struct ibmvnic_sub_crq_queue *scrq)
3873 {
3874 struct device *dev = &adapter->vdev->dev;
3875 struct ibmvnic_tx_pool *tx_pool;
3876 struct ibmvnic_tx_buff *txbuff;
3877 struct netdev_queue *txq;
3878 union sub_crq *next;
3879 int index;
3880 int i;
3881
3882 restart_loop:
3883 while (pending_scrq(adapter, scrq)) {
3884 unsigned int pool = scrq->pool_index;
3885 int num_entries = 0;
3886 int total_bytes = 0;
3887 int num_packets = 0;
3888
3889 next = ibmvnic_next_scrq(adapter, scrq);
3890 for (i = 0; i < next->tx_comp.num_comps; i++) {
3891 index = be32_to_cpu(next->tx_comp.correlators[i]);
3892 if (index & IBMVNIC_TSO_POOL_MASK) {
3893 tx_pool = &adapter->tso_pool[pool];
3894 index &= ~IBMVNIC_TSO_POOL_MASK;
3895 } else {
3896 tx_pool = &adapter->tx_pool[pool];
3897 }
3898
3899 txbuff = &tx_pool->tx_buff[index];
3900 num_packets++;
3901 num_entries += txbuff->num_entries;
3902 if (txbuff->skb) {
3903 total_bytes += txbuff->skb->len;
3904 if (next->tx_comp.rcs[i]) {
3905 dev_err(dev, "tx error %x\n",
3906 next->tx_comp.rcs[i]);
3907 dev_kfree_skb_irq(txbuff->skb);
3908 } else {
3909 dev_consume_skb_irq(txbuff->skb);
3910 }
3911 txbuff->skb = NULL;
3912 } else {
3913 netdev_warn(adapter->netdev,
3914 "TX completion received with NULL socket buffer\n");
3915 }
3916 tx_pool->free_map[tx_pool->producer_index] = index;
3917 tx_pool->producer_index =
3918 (tx_pool->producer_index + 1) %
3919 tx_pool->num_buffers;
3920 }
3921
3922 next->tx_comp.first = 0;
3923
3924 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3925 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3926
3927 if (atomic_sub_return(num_entries, &scrq->used) <=
3928 (adapter->req_tx_entries_per_subcrq / 2) &&
3929 __netif_subqueue_stopped(adapter->netdev,
3930 scrq->pool_index)) {
3931 rcu_read_lock();
3932 if (adapter->tx_queues_active) {
3933 netif_wake_subqueue(adapter->netdev,
3934 scrq->pool_index);
3935 netdev_dbg(adapter->netdev,
3936 "Started queue %d\n",
3937 scrq->pool_index);
3938 }
3939 rcu_read_unlock();
3940 }
3941 }
3942
3943 enable_scrq_irq(adapter, scrq);
3944
3945 if (pending_scrq(adapter, scrq)) {
3946 disable_scrq_irq(adapter, scrq);
3947 goto restart_loop;
3948 }
3949
3950 return 0;
3951 }
3952
3953 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3954 {
3955 struct ibmvnic_sub_crq_queue *scrq = instance;
3956 struct ibmvnic_adapter *adapter = scrq->adapter;
3957
3958 disable_scrq_irq(adapter, scrq);
3959 ibmvnic_complete_tx(adapter, scrq);
3960
3961 return IRQ_HANDLED;
3962 }
3963
3964 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3965 {
3966 struct ibmvnic_sub_crq_queue *scrq = instance;
3967 struct ibmvnic_adapter *adapter = scrq->adapter;
3968
3969
3970
3971
3972 if (unlikely(adapter->state != VNIC_OPEN))
3973 return IRQ_NONE;
3974
3975 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3976
3977 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3978 disable_scrq_irq(adapter, scrq);
3979 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3980 }
3981
3982 return IRQ_HANDLED;
3983 }
3984
3985 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3986 {
3987 struct device *dev = &adapter->vdev->dev;
3988 struct ibmvnic_sub_crq_queue *scrq;
3989 int i = 0, j = 0;
3990 int rc = 0;
3991
3992 for (i = 0; i < adapter->req_tx_queues; i++) {
3993 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3994 i);
3995 scrq = adapter->tx_scrq[i];
3996 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3997
3998 if (!scrq->irq) {
3999 rc = -EINVAL;
4000 dev_err(dev, "Error mapping irq\n");
4001 goto req_tx_irq_failed;
4002 }
4003
4004 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4005 adapter->vdev->unit_address, i);
4006 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4007 0, scrq->name, scrq);
4008
4009 if (rc) {
4010 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4011 scrq->irq, rc);
4012 irq_dispose_mapping(scrq->irq);
4013 goto req_tx_irq_failed;
4014 }
4015 }
4016
4017 for (i = 0; i < adapter->req_rx_queues; i++) {
4018 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4019 i);
4020 scrq = adapter->rx_scrq[i];
4021 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4022 if (!scrq->irq) {
4023 rc = -EINVAL;
4024 dev_err(dev, "Error mapping irq\n");
4025 goto req_rx_irq_failed;
4026 }
4027 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4028 adapter->vdev->unit_address, i);
4029 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4030 0, scrq->name, scrq);
4031 if (rc) {
4032 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4033 scrq->irq, rc);
4034 irq_dispose_mapping(scrq->irq);
4035 goto req_rx_irq_failed;
4036 }
4037 }
4038 return rc;
4039
4040 req_rx_irq_failed:
4041 for (j = 0; j < i; j++) {
4042 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4043 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4044 }
4045 i = adapter->req_tx_queues;
4046 req_tx_irq_failed:
4047 for (j = 0; j < i; j++) {
4048 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4049 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4050 }
4051 release_sub_crqs(adapter, 1);
4052 return rc;
4053 }
4054
4055 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4056 {
4057 struct device *dev = &adapter->vdev->dev;
4058 struct ibmvnic_sub_crq_queue **allqueues;
4059 int registered_queues = 0;
4060 int total_queues;
4061 int more = 0;
4062 int i;
4063
4064 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4065
4066 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4067 if (!allqueues)
4068 return -ENOMEM;
4069
4070 for (i = 0; i < total_queues; i++) {
4071 allqueues[i] = init_sub_crq_queue(adapter);
4072 if (!allqueues[i]) {
4073 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4074 break;
4075 }
4076 registered_queues++;
4077 }
4078
4079
4080 if (registered_queues <
4081 adapter->min_tx_queues + adapter->min_rx_queues) {
4082 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
4083 goto tx_failed;
4084 }
4085
4086
4087 for (i = 0; i < total_queues - registered_queues + more ; i++) {
4088 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4089 switch (i % 3) {
4090 case 0:
4091 if (adapter->req_rx_queues > adapter->min_rx_queues)
4092 adapter->req_rx_queues--;
4093 else
4094 more++;
4095 break;
4096 case 1:
4097 if (adapter->req_tx_queues > adapter->min_tx_queues)
4098 adapter->req_tx_queues--;
4099 else
4100 more++;
4101 break;
4102 }
4103 }
4104
4105 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4106 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4107 if (!adapter->tx_scrq)
4108 goto tx_failed;
4109
4110 for (i = 0; i < adapter->req_tx_queues; i++) {
4111 adapter->tx_scrq[i] = allqueues[i];
4112 adapter->tx_scrq[i]->pool_index = i;
4113 adapter->num_active_tx_scrqs++;
4114 }
4115
4116 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4117 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4118 if (!adapter->rx_scrq)
4119 goto rx_failed;
4120
4121 for (i = 0; i < adapter->req_rx_queues; i++) {
4122 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4123 adapter->rx_scrq[i]->scrq_num = i;
4124 adapter->num_active_rx_scrqs++;
4125 }
4126
4127 kfree(allqueues);
4128 return 0;
4129
4130 rx_failed:
4131 kfree(adapter->tx_scrq);
4132 adapter->tx_scrq = NULL;
4133 tx_failed:
4134 for (i = 0; i < registered_queues; i++)
4135 release_sub_crq_queue(adapter, allqueues[i], 1);
4136 kfree(allqueues);
4137 return -ENOMEM;
4138 }
4139
4140 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4141 {
4142 struct device *dev = &adapter->vdev->dev;
4143 union ibmvnic_crq crq;
4144 int max_entries;
4145 int cap_reqs;
4146
4147
4148
4149
4150
4151
4152 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4153 adapter->promisc_supported)
4154 cap_reqs = 7;
4155 else
4156 cap_reqs = 6;
4157
4158 if (!retry) {
4159
4160 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4161
4162 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4163
4164 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4165 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4166 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4167 return;
4168 }
4169
4170 if (adapter->desired.mtu)
4171 adapter->req_mtu = adapter->desired.mtu;
4172 else
4173 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4174
4175 if (!adapter->desired.tx_entries)
4176 adapter->desired.tx_entries =
4177 adapter->max_tx_entries_per_subcrq;
4178 if (!adapter->desired.rx_entries)
4179 adapter->desired.rx_entries =
4180 adapter->max_rx_add_entries_per_subcrq;
4181
4182 max_entries = IBMVNIC_LTB_SET_SIZE /
4183 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4184
4185 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4186 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4187 adapter->desired.tx_entries = max_entries;
4188 }
4189
4190 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4191 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4192 adapter->desired.rx_entries = max_entries;
4193 }
4194
4195 if (adapter->desired.tx_entries)
4196 adapter->req_tx_entries_per_subcrq =
4197 adapter->desired.tx_entries;
4198 else
4199 adapter->req_tx_entries_per_subcrq =
4200 adapter->max_tx_entries_per_subcrq;
4201
4202 if (adapter->desired.rx_entries)
4203 adapter->req_rx_add_entries_per_subcrq =
4204 adapter->desired.rx_entries;
4205 else
4206 adapter->req_rx_add_entries_per_subcrq =
4207 adapter->max_rx_add_entries_per_subcrq;
4208
4209 if (adapter->desired.tx_queues)
4210 adapter->req_tx_queues =
4211 adapter->desired.tx_queues;
4212 else
4213 adapter->req_tx_queues =
4214 adapter->opt_tx_comp_sub_queues;
4215
4216 if (adapter->desired.rx_queues)
4217 adapter->req_rx_queues =
4218 adapter->desired.rx_queues;
4219 else
4220 adapter->req_rx_queues =
4221 adapter->opt_rx_comp_queues;
4222
4223 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4224 } else {
4225 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4226 }
4227 memset(&crq, 0, sizeof(crq));
4228 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4229 crq.request_capability.cmd = REQUEST_CAPABILITY;
4230
4231 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4232 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4233 cap_reqs--;
4234 ibmvnic_send_crq(adapter, &crq);
4235
4236 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4237 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4238 cap_reqs--;
4239 ibmvnic_send_crq(adapter, &crq);
4240
4241 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4242 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4243 cap_reqs--;
4244 ibmvnic_send_crq(adapter, &crq);
4245
4246 crq.request_capability.capability =
4247 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4248 crq.request_capability.number =
4249 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4250 cap_reqs--;
4251 ibmvnic_send_crq(adapter, &crq);
4252
4253 crq.request_capability.capability =
4254 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4255 crq.request_capability.number =
4256 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4257 cap_reqs--;
4258 ibmvnic_send_crq(adapter, &crq);
4259
4260 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4261 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4262 cap_reqs--;
4263 ibmvnic_send_crq(adapter, &crq);
4264
4265 if (adapter->netdev->flags & IFF_PROMISC) {
4266 if (adapter->promisc_supported) {
4267 crq.request_capability.capability =
4268 cpu_to_be16(PROMISC_REQUESTED);
4269 crq.request_capability.number = cpu_to_be64(1);
4270 cap_reqs--;
4271 ibmvnic_send_crq(adapter, &crq);
4272 }
4273 } else {
4274 crq.request_capability.capability =
4275 cpu_to_be16(PROMISC_REQUESTED);
4276 crq.request_capability.number = cpu_to_be64(0);
4277 cap_reqs--;
4278 ibmvnic_send_crq(adapter, &crq);
4279 }
4280
4281
4282
4283
4284 WARN_ON(cap_reqs != 0);
4285 }
4286
4287 static int pending_scrq(struct ibmvnic_adapter *adapter,
4288 struct ibmvnic_sub_crq_queue *scrq)
4289 {
4290 union sub_crq *entry = &scrq->msgs[scrq->cur];
4291 int rc;
4292
4293 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4294
4295
4296
4297
4298 dma_rmb();
4299
4300 return rc;
4301 }
4302
4303 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4304 struct ibmvnic_sub_crq_queue *scrq)
4305 {
4306 union sub_crq *entry;
4307 unsigned long flags;
4308
4309 spin_lock_irqsave(&scrq->lock, flags);
4310 entry = &scrq->msgs[scrq->cur];
4311 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4312 if (++scrq->cur == scrq->size)
4313 scrq->cur = 0;
4314 } else {
4315 entry = NULL;
4316 }
4317 spin_unlock_irqrestore(&scrq->lock, flags);
4318
4319
4320
4321
4322 dma_rmb();
4323
4324 return entry;
4325 }
4326
4327 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4328 {
4329 struct ibmvnic_crq_queue *queue = &adapter->crq;
4330 union ibmvnic_crq *crq;
4331
4332 crq = &queue->msgs[queue->cur];
4333 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4334 if (++queue->cur == queue->size)
4335 queue->cur = 0;
4336 } else {
4337 crq = NULL;
4338 }
4339
4340 return crq;
4341 }
4342
4343 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4344 {
4345 switch (rc) {
4346 case H_PARAMETER:
4347 dev_warn_ratelimited(dev,
4348 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4349 func, rc);
4350 break;
4351 case H_CLOSED:
4352 dev_warn_ratelimited(dev,
4353 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4354 func, rc);
4355 break;
4356 default:
4357 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4358 break;
4359 }
4360 }
4361
4362 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4363 u64 remote_handle, u64 ioba, u64 num_entries)
4364 {
4365 unsigned int ua = adapter->vdev->unit_address;
4366 struct device *dev = &adapter->vdev->dev;
4367 int rc;
4368
4369
4370 dma_wmb();
4371 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4372 cpu_to_be64(remote_handle),
4373 ioba, num_entries);
4374
4375 if (rc)
4376 print_subcrq_error(dev, rc, __func__);
4377
4378 return rc;
4379 }
4380
4381 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4382 union ibmvnic_crq *crq)
4383 {
4384 unsigned int ua = adapter->vdev->unit_address;
4385 struct device *dev = &adapter->vdev->dev;
4386 u64 *u64_crq = (u64 *)crq;
4387 int rc;
4388
4389 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4390 (unsigned long)cpu_to_be64(u64_crq[0]),
4391 (unsigned long)cpu_to_be64(u64_crq[1]));
4392
4393 if (!adapter->crq.active &&
4394 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4395 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4396 return -EINVAL;
4397 }
4398
4399
4400 dma_wmb();
4401
4402 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4403 cpu_to_be64(u64_crq[0]),
4404 cpu_to_be64(u64_crq[1]));
4405
4406 if (rc) {
4407 if (rc == H_CLOSED) {
4408 dev_warn(dev, "CRQ Queue closed\n");
4409
4410 }
4411
4412 dev_warn(dev, "Send error (rc=%d)\n", rc);
4413 }
4414
4415 return rc;
4416 }
4417
4418 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4419 {
4420 struct device *dev = &adapter->vdev->dev;
4421 union ibmvnic_crq crq;
4422 int retries = 100;
4423 int rc;
4424
4425 memset(&crq, 0, sizeof(crq));
4426 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4427 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4428 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4429
4430 do {
4431 rc = ibmvnic_send_crq(adapter, &crq);
4432 if (rc != H_CLOSED)
4433 break;
4434 retries--;
4435 msleep(50);
4436
4437 } while (retries > 0);
4438
4439 if (rc) {
4440 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4441 return rc;
4442 }
4443
4444 return 0;
4445 }
4446
4447 struct vnic_login_client_data {
4448 u8 type;
4449 __be16 len;
4450 char name[];
4451 } __packed;
4452
4453 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4454 {
4455 int len;
4456
4457
4458
4459
4460
4461 len = 4 * sizeof(struct vnic_login_client_data);
4462 len += 6;
4463 len += strlen(utsname()->nodename) + 1;
4464 len += strlen(adapter->netdev->name) + 1;
4465
4466 return len;
4467 }
4468
4469 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4470 struct vnic_login_client_data *vlcd)
4471 {
4472 const char *os_name = "Linux";
4473 int len;
4474
4475
4476 vlcd->type = 1;
4477 len = strlen(os_name) + 1;
4478 vlcd->len = cpu_to_be16(len);
4479 strscpy(vlcd->name, os_name, len);
4480 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4481
4482
4483 vlcd->type = 2;
4484 len = strlen(utsname()->nodename) + 1;
4485 vlcd->len = cpu_to_be16(len);
4486 strscpy(vlcd->name, utsname()->nodename, len);
4487 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4488
4489
4490 vlcd->type = 3;
4491 len = strlen(adapter->netdev->name) + 1;
4492 vlcd->len = cpu_to_be16(len);
4493 strscpy(vlcd->name, adapter->netdev->name, len);
4494 }
4495
4496 static int send_login(struct ibmvnic_adapter *adapter)
4497 {
4498 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4499 struct ibmvnic_login_buffer *login_buffer;
4500 struct device *dev = &adapter->vdev->dev;
4501 struct vnic_login_client_data *vlcd;
4502 dma_addr_t rsp_buffer_token;
4503 dma_addr_t buffer_token;
4504 size_t rsp_buffer_size;
4505 union ibmvnic_crq crq;
4506 int client_data_len;
4507 size_t buffer_size;
4508 __be64 *tx_list_p;
4509 __be64 *rx_list_p;
4510 int rc;
4511 int i;
4512
4513 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4514 netdev_err(adapter->netdev,
4515 "RX or TX queues are not allocated, device login failed\n");
4516 return -ENOMEM;
4517 }
4518
4519 release_login_buffer(adapter);
4520 release_login_rsp_buffer(adapter);
4521
4522 client_data_len = vnic_client_data_len(adapter);
4523
4524 buffer_size =
4525 sizeof(struct ibmvnic_login_buffer) +
4526 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4527 client_data_len;
4528
4529 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4530 if (!login_buffer)
4531 goto buf_alloc_failed;
4532
4533 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4534 DMA_TO_DEVICE);
4535 if (dma_mapping_error(dev, buffer_token)) {
4536 dev_err(dev, "Couldn't map login buffer\n");
4537 goto buf_map_failed;
4538 }
4539
4540 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4541 sizeof(u64) * adapter->req_tx_queues +
4542 sizeof(u64) * adapter->req_rx_queues +
4543 sizeof(u64) * adapter->req_rx_queues +
4544 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4545
4546 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4547 if (!login_rsp_buffer)
4548 goto buf_rsp_alloc_failed;
4549
4550 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4551 rsp_buffer_size, DMA_FROM_DEVICE);
4552 if (dma_mapping_error(dev, rsp_buffer_token)) {
4553 dev_err(dev, "Couldn't map login rsp buffer\n");
4554 goto buf_rsp_map_failed;
4555 }
4556
4557 adapter->login_buf = login_buffer;
4558 adapter->login_buf_token = buffer_token;
4559 adapter->login_buf_sz = buffer_size;
4560 adapter->login_rsp_buf = login_rsp_buffer;
4561 adapter->login_rsp_buf_token = rsp_buffer_token;
4562 adapter->login_rsp_buf_sz = rsp_buffer_size;
4563
4564 login_buffer->len = cpu_to_be32(buffer_size);
4565 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4566 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4567 login_buffer->off_txcomp_subcrqs =
4568 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4569 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4570 login_buffer->off_rxcomp_subcrqs =
4571 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4572 sizeof(u64) * adapter->req_tx_queues);
4573 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4574 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4575
4576 tx_list_p = (__be64 *)((char *)login_buffer +
4577 sizeof(struct ibmvnic_login_buffer));
4578 rx_list_p = (__be64 *)((char *)login_buffer +
4579 sizeof(struct ibmvnic_login_buffer) +
4580 sizeof(u64) * adapter->req_tx_queues);
4581
4582 for (i = 0; i < adapter->req_tx_queues; i++) {
4583 if (adapter->tx_scrq[i]) {
4584 tx_list_p[i] =
4585 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4586 }
4587 }
4588
4589 for (i = 0; i < adapter->req_rx_queues; i++) {
4590 if (adapter->rx_scrq[i]) {
4591 rx_list_p[i] =
4592 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4593 }
4594 }
4595
4596
4597 vlcd = (struct vnic_login_client_data *)
4598 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4599 login_buffer->client_data_offset =
4600 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4601 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4602
4603 vnic_add_client_data(adapter, vlcd);
4604
4605 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4606 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4607 netdev_dbg(adapter->netdev, "%016lx\n",
4608 ((unsigned long *)(adapter->login_buf))[i]);
4609 }
4610
4611 memset(&crq, 0, sizeof(crq));
4612 crq.login.first = IBMVNIC_CRQ_CMD;
4613 crq.login.cmd = LOGIN;
4614 crq.login.ioba = cpu_to_be32(buffer_token);
4615 crq.login.len = cpu_to_be32(buffer_size);
4616
4617 adapter->login_pending = true;
4618 rc = ibmvnic_send_crq(adapter, &crq);
4619 if (rc) {
4620 adapter->login_pending = false;
4621 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4622 goto buf_rsp_map_failed;
4623 }
4624
4625 return 0;
4626
4627 buf_rsp_map_failed:
4628 kfree(login_rsp_buffer);
4629 adapter->login_rsp_buf = NULL;
4630 buf_rsp_alloc_failed:
4631 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4632 buf_map_failed:
4633 kfree(login_buffer);
4634 adapter->login_buf = NULL;
4635 buf_alloc_failed:
4636 return -ENOMEM;
4637 }
4638
4639 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4640 u32 len, u8 map_id)
4641 {
4642 union ibmvnic_crq crq;
4643
4644 memset(&crq, 0, sizeof(crq));
4645 crq.request_map.first = IBMVNIC_CRQ_CMD;
4646 crq.request_map.cmd = REQUEST_MAP;
4647 crq.request_map.map_id = map_id;
4648 crq.request_map.ioba = cpu_to_be32(addr);
4649 crq.request_map.len = cpu_to_be32(len);
4650 return ibmvnic_send_crq(adapter, &crq);
4651 }
4652
4653 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4654 {
4655 union ibmvnic_crq crq;
4656
4657 memset(&crq, 0, sizeof(crq));
4658 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4659 crq.request_unmap.cmd = REQUEST_UNMAP;
4660 crq.request_unmap.map_id = map_id;
4661 return ibmvnic_send_crq(adapter, &crq);
4662 }
4663
4664 static void send_query_map(struct ibmvnic_adapter *adapter)
4665 {
4666 union ibmvnic_crq crq;
4667
4668 memset(&crq, 0, sizeof(crq));
4669 crq.query_map.first = IBMVNIC_CRQ_CMD;
4670 crq.query_map.cmd = QUERY_MAP;
4671 ibmvnic_send_crq(adapter, &crq);
4672 }
4673
4674
4675 static void send_query_cap(struct ibmvnic_adapter *adapter)
4676 {
4677 union ibmvnic_crq crq;
4678 int cap_reqs;
4679
4680
4681
4682
4683
4684 cap_reqs = 25;
4685
4686 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4687
4688 memset(&crq, 0, sizeof(crq));
4689 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4690 crq.query_capability.cmd = QUERY_CAPABILITY;
4691
4692 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4693 ibmvnic_send_crq(adapter, &crq);
4694 cap_reqs--;
4695
4696 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4697 ibmvnic_send_crq(adapter, &crq);
4698 cap_reqs--;
4699
4700 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4701 ibmvnic_send_crq(adapter, &crq);
4702 cap_reqs--;
4703
4704 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4705 ibmvnic_send_crq(adapter, &crq);
4706 cap_reqs--;
4707
4708 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4709 ibmvnic_send_crq(adapter, &crq);
4710 cap_reqs--;
4711
4712 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4713 ibmvnic_send_crq(adapter, &crq);
4714 cap_reqs--;
4715
4716 crq.query_capability.capability =
4717 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4718 ibmvnic_send_crq(adapter, &crq);
4719 cap_reqs--;
4720
4721 crq.query_capability.capability =
4722 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4723 ibmvnic_send_crq(adapter, &crq);
4724 cap_reqs--;
4725
4726 crq.query_capability.capability =
4727 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4728 ibmvnic_send_crq(adapter, &crq);
4729 cap_reqs--;
4730
4731 crq.query_capability.capability =
4732 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4733 ibmvnic_send_crq(adapter, &crq);
4734 cap_reqs--;
4735
4736 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4737 ibmvnic_send_crq(adapter, &crq);
4738 cap_reqs--;
4739
4740 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4741 ibmvnic_send_crq(adapter, &crq);
4742 cap_reqs--;
4743
4744 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4745 ibmvnic_send_crq(adapter, &crq);
4746 cap_reqs--;
4747
4748 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4749 ibmvnic_send_crq(adapter, &crq);
4750 cap_reqs--;
4751
4752 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4753 ibmvnic_send_crq(adapter, &crq);
4754 cap_reqs--;
4755
4756 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4757 ibmvnic_send_crq(adapter, &crq);
4758 cap_reqs--;
4759
4760 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4761 ibmvnic_send_crq(adapter, &crq);
4762 cap_reqs--;
4763
4764 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4765 ibmvnic_send_crq(adapter, &crq);
4766 cap_reqs--;
4767
4768 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4769 ibmvnic_send_crq(adapter, &crq);
4770 cap_reqs--;
4771
4772 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4773 ibmvnic_send_crq(adapter, &crq);
4774 cap_reqs--;
4775
4776 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4777 ibmvnic_send_crq(adapter, &crq);
4778 cap_reqs--;
4779
4780 crq.query_capability.capability =
4781 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4782 ibmvnic_send_crq(adapter, &crq);
4783 cap_reqs--;
4784
4785 crq.query_capability.capability =
4786 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4787 ibmvnic_send_crq(adapter, &crq);
4788 cap_reqs--;
4789
4790 crq.query_capability.capability =
4791 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4792 ibmvnic_send_crq(adapter, &crq);
4793 cap_reqs--;
4794
4795 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4796
4797 ibmvnic_send_crq(adapter, &crq);
4798 cap_reqs--;
4799
4800
4801
4802
4803 WARN_ON(cap_reqs != 0);
4804 }
4805
4806 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4807 {
4808 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4809 struct device *dev = &adapter->vdev->dev;
4810 union ibmvnic_crq crq;
4811
4812 adapter->ip_offload_tok =
4813 dma_map_single(dev,
4814 &adapter->ip_offload_buf,
4815 buf_sz,
4816 DMA_FROM_DEVICE);
4817
4818 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4819 if (!firmware_has_feature(FW_FEATURE_CMO))
4820 dev_err(dev, "Couldn't map offload buffer\n");
4821 return;
4822 }
4823
4824 memset(&crq, 0, sizeof(crq));
4825 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4826 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4827 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4828 crq.query_ip_offload.ioba =
4829 cpu_to_be32(adapter->ip_offload_tok);
4830
4831 ibmvnic_send_crq(adapter, &crq);
4832 }
4833
4834 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4835 {
4836 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4837 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4838 struct device *dev = &adapter->vdev->dev;
4839 netdev_features_t old_hw_features = 0;
4840 union ibmvnic_crq crq;
4841
4842 adapter->ip_offload_ctrl_tok =
4843 dma_map_single(dev,
4844 ctrl_buf,
4845 sizeof(adapter->ip_offload_ctrl),
4846 DMA_TO_DEVICE);
4847
4848 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4849 dev_err(dev, "Couldn't map ip offload control buffer\n");
4850 return;
4851 }
4852
4853 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4854 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4855 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4856 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4857 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4858 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4859 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4860 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4861 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4862 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4863
4864
4865 ctrl_buf->large_rx_ipv4 = 0;
4866 ctrl_buf->large_rx_ipv6 = 0;
4867
4868 if (adapter->state != VNIC_PROBING) {
4869 old_hw_features = adapter->netdev->hw_features;
4870 adapter->netdev->hw_features = 0;
4871 }
4872
4873 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4874
4875 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4876 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4877
4878 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4879 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4880
4881 if ((adapter->netdev->features &
4882 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4883 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4884
4885 if (buf->large_tx_ipv4)
4886 adapter->netdev->hw_features |= NETIF_F_TSO;
4887 if (buf->large_tx_ipv6)
4888 adapter->netdev->hw_features |= NETIF_F_TSO6;
4889
4890 if (adapter->state == VNIC_PROBING) {
4891 adapter->netdev->features |= adapter->netdev->hw_features;
4892 } else if (old_hw_features != adapter->netdev->hw_features) {
4893 netdev_features_t tmp = 0;
4894
4895
4896 adapter->netdev->features &= adapter->netdev->hw_features;
4897
4898 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4899 adapter->netdev->hw_features;
4900 adapter->netdev->features |=
4901 tmp & adapter->netdev->wanted_features;
4902 }
4903
4904 memset(&crq, 0, sizeof(crq));
4905 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4906 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4907 crq.control_ip_offload.len =
4908 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4909 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4910 ibmvnic_send_crq(adapter, &crq);
4911 }
4912
4913 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4914 struct ibmvnic_adapter *adapter)
4915 {
4916 struct device *dev = &adapter->vdev->dev;
4917
4918 if (crq->get_vpd_size_rsp.rc.code) {
4919 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4920 crq->get_vpd_size_rsp.rc.code);
4921 complete(&adapter->fw_done);
4922 return;
4923 }
4924
4925 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4926 complete(&adapter->fw_done);
4927 }
4928
4929 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4930 struct ibmvnic_adapter *adapter)
4931 {
4932 struct device *dev = &adapter->vdev->dev;
4933 unsigned char *substr = NULL;
4934 u8 fw_level_len = 0;
4935
4936 memset(adapter->fw_version, 0, 32);
4937
4938 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4939 DMA_FROM_DEVICE);
4940
4941 if (crq->get_vpd_rsp.rc.code) {
4942 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4943 crq->get_vpd_rsp.rc.code);
4944 goto complete;
4945 }
4946
4947
4948
4949
4950 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4951 if (!substr) {
4952 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4953 goto complete;
4954 }
4955
4956
4957 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4958 fw_level_len = *(substr + 2);
4959 } else {
4960 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4961 goto complete;
4962 }
4963
4964
4965 if ((substr + 3 + fw_level_len) <
4966 (adapter->vpd->buff + adapter->vpd->len)) {
4967 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4968 } else {
4969 dev_info(dev, "FW substr extrapolated VPD buff\n");
4970 }
4971
4972 complete:
4973 if (adapter->fw_version[0] == '\0')
4974 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4975 complete(&adapter->fw_done);
4976 }
4977
4978 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4979 {
4980 struct device *dev = &adapter->vdev->dev;
4981 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4982 int i;
4983
4984 dma_unmap_single(dev, adapter->ip_offload_tok,
4985 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4986
4987 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4988 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4989 netdev_dbg(adapter->netdev, "%016lx\n",
4990 ((unsigned long *)(buf))[i]);
4991
4992 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4993 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4994 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4995 buf->tcp_ipv4_chksum);
4996 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4997 buf->tcp_ipv6_chksum);
4998 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4999 buf->udp_ipv4_chksum);
5000 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5001 buf->udp_ipv6_chksum);
5002 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5003 buf->large_tx_ipv4);
5004 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5005 buf->large_tx_ipv6);
5006 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5007 buf->large_rx_ipv4);
5008 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5009 buf->large_rx_ipv6);
5010 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5011 buf->max_ipv4_header_size);
5012 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5013 buf->max_ipv6_header_size);
5014 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5015 buf->max_tcp_header_size);
5016 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5017 buf->max_udp_header_size);
5018 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5019 buf->max_large_tx_size);
5020 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5021 buf->max_large_rx_size);
5022 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5023 buf->ipv6_extension_header);
5024 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5025 buf->tcp_pseudosum_req);
5026 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5027 buf->num_ipv6_ext_headers);
5028 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5029 buf->off_ipv6_ext_headers);
5030
5031 send_control_ip_offload(adapter);
5032 }
5033
5034 static const char *ibmvnic_fw_err_cause(u16 cause)
5035 {
5036 switch (cause) {
5037 case ADAPTER_PROBLEM:
5038 return "adapter problem";
5039 case BUS_PROBLEM:
5040 return "bus problem";
5041 case FW_PROBLEM:
5042 return "firmware problem";
5043 case DD_PROBLEM:
5044 return "device driver problem";
5045 case EEH_RECOVERY:
5046 return "EEH recovery";
5047 case FW_UPDATED:
5048 return "firmware updated";
5049 case LOW_MEMORY:
5050 return "low Memory";
5051 default:
5052 return "unknown";
5053 }
5054 }
5055
5056 static void handle_error_indication(union ibmvnic_crq *crq,
5057 struct ibmvnic_adapter *adapter)
5058 {
5059 struct device *dev = &adapter->vdev->dev;
5060 u16 cause;
5061
5062 cause = be16_to_cpu(crq->error_indication.error_cause);
5063
5064 dev_warn_ratelimited(dev,
5065 "Firmware reports %serror, cause: %s. Starting recovery...\n",
5066 crq->error_indication.flags
5067 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5068 ibmvnic_fw_err_cause(cause));
5069
5070 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5071 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5072 else
5073 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5074 }
5075
5076 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5077 struct ibmvnic_adapter *adapter)
5078 {
5079 struct net_device *netdev = adapter->netdev;
5080 struct device *dev = &adapter->vdev->dev;
5081 long rc;
5082
5083 rc = crq->change_mac_addr_rsp.rc.code;
5084 if (rc) {
5085 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5086 goto out;
5087 }
5088
5089
5090
5091 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5092 ether_addr_copy(adapter->mac_addr,
5093 &crq->change_mac_addr_rsp.mac_addr[0]);
5094 out:
5095 complete(&adapter->fw_done);
5096 return rc;
5097 }
5098
5099 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5100 struct ibmvnic_adapter *adapter)
5101 {
5102 struct device *dev = &adapter->vdev->dev;
5103 u64 *req_value;
5104 char *name;
5105
5106 atomic_dec(&adapter->running_cap_crqs);
5107 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5108 atomic_read(&adapter->running_cap_crqs));
5109 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5110 case REQ_TX_QUEUES:
5111 req_value = &adapter->req_tx_queues;
5112 name = "tx";
5113 break;
5114 case REQ_RX_QUEUES:
5115 req_value = &adapter->req_rx_queues;
5116 name = "rx";
5117 break;
5118 case REQ_RX_ADD_QUEUES:
5119 req_value = &adapter->req_rx_add_queues;
5120 name = "rx_add";
5121 break;
5122 case REQ_TX_ENTRIES_PER_SUBCRQ:
5123 req_value = &adapter->req_tx_entries_per_subcrq;
5124 name = "tx_entries_per_subcrq";
5125 break;
5126 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5127 req_value = &adapter->req_rx_add_entries_per_subcrq;
5128 name = "rx_add_entries_per_subcrq";
5129 break;
5130 case REQ_MTU:
5131 req_value = &adapter->req_mtu;
5132 name = "mtu";
5133 break;
5134 case PROMISC_REQUESTED:
5135 req_value = &adapter->promisc;
5136 name = "promisc";
5137 break;
5138 default:
5139 dev_err(dev, "Got invalid cap request rsp %d\n",
5140 crq->request_capability.capability);
5141 return;
5142 }
5143
5144 switch (crq->request_capability_rsp.rc.code) {
5145 case SUCCESS:
5146 break;
5147 case PARTIALSUCCESS:
5148 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5149 *req_value,
5150 (long)be64_to_cpu(crq->request_capability_rsp.number),
5151 name);
5152
5153 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5154 REQ_MTU) {
5155 pr_err("mtu of %llu is not supported. Reverting.\n",
5156 *req_value);
5157 *req_value = adapter->fallback.mtu;
5158 } else {
5159 *req_value =
5160 be64_to_cpu(crq->request_capability_rsp.number);
5161 }
5162
5163 send_request_cap(adapter, 1);
5164 return;
5165 default:
5166 dev_err(dev, "Error %d in request cap rsp\n",
5167 crq->request_capability_rsp.rc.code);
5168 return;
5169 }
5170
5171
5172 if (atomic_read(&adapter->running_cap_crqs) == 0)
5173 send_query_ip_offload(adapter);
5174 }
5175
5176 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5177 struct ibmvnic_adapter *adapter)
5178 {
5179 struct device *dev = &adapter->vdev->dev;
5180 struct net_device *netdev = adapter->netdev;
5181 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5182 struct ibmvnic_login_buffer *login = adapter->login_buf;
5183 u64 *tx_handle_array;
5184 u64 *rx_handle_array;
5185 int num_tx_pools;
5186 int num_rx_pools;
5187 u64 *size_array;
5188 int i;
5189
5190
5191
5192
5193 if (!adapter->login_pending) {
5194 netdev_warn(netdev, "Ignoring unexpected login response\n");
5195 return 0;
5196 }
5197 adapter->login_pending = false;
5198
5199 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
5200 DMA_TO_DEVICE);
5201 dma_unmap_single(dev, adapter->login_rsp_buf_token,
5202 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
5203
5204
5205
5206
5207
5208 if (login_rsp_crq->generic.rc.code) {
5209 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5210 complete(&adapter->init_done);
5211 return 0;
5212 }
5213
5214 if (adapter->failover_pending) {
5215 adapter->init_done_rc = -EAGAIN;
5216 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5217 complete(&adapter->init_done);
5218
5219 return 0;
5220 }
5221
5222 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5223
5224 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5225 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5226 netdev_dbg(adapter->netdev, "%016lx\n",
5227 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5228 }
5229
5230
5231 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5232 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5233 adapter->req_rx_add_queues !=
5234 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5235 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5236 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5237 return -EIO;
5238 }
5239 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5240 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5241
5242
5243
5244 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5245
5246 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5247 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5248
5249 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5250 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5251 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5252 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5253
5254 for (i = 0; i < num_tx_pools; i++)
5255 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5256
5257 for (i = 0; i < num_rx_pools; i++)
5258 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5259
5260 adapter->num_active_tx_scrqs = num_tx_pools;
5261 adapter->num_active_rx_scrqs = num_rx_pools;
5262 release_login_rsp_buffer(adapter);
5263 release_login_buffer(adapter);
5264 complete(&adapter->init_done);
5265
5266 return 0;
5267 }
5268
5269 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5270 struct ibmvnic_adapter *adapter)
5271 {
5272 struct device *dev = &adapter->vdev->dev;
5273 long rc;
5274
5275 rc = crq->request_unmap_rsp.rc.code;
5276 if (rc)
5277 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5278 }
5279
5280 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5281 struct ibmvnic_adapter *adapter)
5282 {
5283 struct net_device *netdev = adapter->netdev;
5284 struct device *dev = &adapter->vdev->dev;
5285 long rc;
5286
5287 rc = crq->query_map_rsp.rc.code;
5288 if (rc) {
5289 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5290 return;
5291 }
5292 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5293 crq->query_map_rsp.page_size,
5294 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5295 __be32_to_cpu(crq->query_map_rsp.free_pages));
5296 }
5297
5298 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5299 struct ibmvnic_adapter *adapter)
5300 {
5301 struct net_device *netdev = adapter->netdev;
5302 struct device *dev = &adapter->vdev->dev;
5303 long rc;
5304
5305 atomic_dec(&adapter->running_cap_crqs);
5306 netdev_dbg(netdev, "Outstanding queries: %d\n",
5307 atomic_read(&adapter->running_cap_crqs));
5308 rc = crq->query_capability.rc.code;
5309 if (rc) {
5310 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5311 goto out;
5312 }
5313
5314 switch (be16_to_cpu(crq->query_capability.capability)) {
5315 case MIN_TX_QUEUES:
5316 adapter->min_tx_queues =
5317 be64_to_cpu(crq->query_capability.number);
5318 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5319 adapter->min_tx_queues);
5320 break;
5321 case MIN_RX_QUEUES:
5322 adapter->min_rx_queues =
5323 be64_to_cpu(crq->query_capability.number);
5324 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5325 adapter->min_rx_queues);
5326 break;
5327 case MIN_RX_ADD_QUEUES:
5328 adapter->min_rx_add_queues =
5329 be64_to_cpu(crq->query_capability.number);
5330 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5331 adapter->min_rx_add_queues);
5332 break;
5333 case MAX_TX_QUEUES:
5334 adapter->max_tx_queues =
5335 be64_to_cpu(crq->query_capability.number);
5336 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5337 adapter->max_tx_queues);
5338 break;
5339 case MAX_RX_QUEUES:
5340 adapter->max_rx_queues =
5341 be64_to_cpu(crq->query_capability.number);
5342 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5343 adapter->max_rx_queues);
5344 break;
5345 case MAX_RX_ADD_QUEUES:
5346 adapter->max_rx_add_queues =
5347 be64_to_cpu(crq->query_capability.number);
5348 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5349 adapter->max_rx_add_queues);
5350 break;
5351 case MIN_TX_ENTRIES_PER_SUBCRQ:
5352 adapter->min_tx_entries_per_subcrq =
5353 be64_to_cpu(crq->query_capability.number);
5354 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5355 adapter->min_tx_entries_per_subcrq);
5356 break;
5357 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5358 adapter->min_rx_add_entries_per_subcrq =
5359 be64_to_cpu(crq->query_capability.number);
5360 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5361 adapter->min_rx_add_entries_per_subcrq);
5362 break;
5363 case MAX_TX_ENTRIES_PER_SUBCRQ:
5364 adapter->max_tx_entries_per_subcrq =
5365 be64_to_cpu(crq->query_capability.number);
5366 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5367 adapter->max_tx_entries_per_subcrq);
5368 break;
5369 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5370 adapter->max_rx_add_entries_per_subcrq =
5371 be64_to_cpu(crq->query_capability.number);
5372 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5373 adapter->max_rx_add_entries_per_subcrq);
5374 break;
5375 case TCP_IP_OFFLOAD:
5376 adapter->tcp_ip_offload =
5377 be64_to_cpu(crq->query_capability.number);
5378 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5379 adapter->tcp_ip_offload);
5380 break;
5381 case PROMISC_SUPPORTED:
5382 adapter->promisc_supported =
5383 be64_to_cpu(crq->query_capability.number);
5384 netdev_dbg(netdev, "promisc_supported = %lld\n",
5385 adapter->promisc_supported);
5386 break;
5387 case MIN_MTU:
5388 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5389 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5390 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5391 break;
5392 case MAX_MTU:
5393 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5394 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5395 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5396 break;
5397 case MAX_MULTICAST_FILTERS:
5398 adapter->max_multicast_filters =
5399 be64_to_cpu(crq->query_capability.number);
5400 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5401 adapter->max_multicast_filters);
5402 break;
5403 case VLAN_HEADER_INSERTION:
5404 adapter->vlan_header_insertion =
5405 be64_to_cpu(crq->query_capability.number);
5406 if (adapter->vlan_header_insertion)
5407 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5408 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5409 adapter->vlan_header_insertion);
5410 break;
5411 case RX_VLAN_HEADER_INSERTION:
5412 adapter->rx_vlan_header_insertion =
5413 be64_to_cpu(crq->query_capability.number);
5414 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5415 adapter->rx_vlan_header_insertion);
5416 break;
5417 case MAX_TX_SG_ENTRIES:
5418 adapter->max_tx_sg_entries =
5419 be64_to_cpu(crq->query_capability.number);
5420 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5421 adapter->max_tx_sg_entries);
5422 break;
5423 case RX_SG_SUPPORTED:
5424 adapter->rx_sg_supported =
5425 be64_to_cpu(crq->query_capability.number);
5426 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5427 adapter->rx_sg_supported);
5428 break;
5429 case OPT_TX_COMP_SUB_QUEUES:
5430 adapter->opt_tx_comp_sub_queues =
5431 be64_to_cpu(crq->query_capability.number);
5432 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5433 adapter->opt_tx_comp_sub_queues);
5434 break;
5435 case OPT_RX_COMP_QUEUES:
5436 adapter->opt_rx_comp_queues =
5437 be64_to_cpu(crq->query_capability.number);
5438 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5439 adapter->opt_rx_comp_queues);
5440 break;
5441 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5442 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5443 be64_to_cpu(crq->query_capability.number);
5444 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5445 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5446 break;
5447 case OPT_TX_ENTRIES_PER_SUBCRQ:
5448 adapter->opt_tx_entries_per_subcrq =
5449 be64_to_cpu(crq->query_capability.number);
5450 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5451 adapter->opt_tx_entries_per_subcrq);
5452 break;
5453 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5454 adapter->opt_rxba_entries_per_subcrq =
5455 be64_to_cpu(crq->query_capability.number);
5456 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5457 adapter->opt_rxba_entries_per_subcrq);
5458 break;
5459 case TX_RX_DESC_REQ:
5460 adapter->tx_rx_desc_req = crq->query_capability.number;
5461 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5462 adapter->tx_rx_desc_req);
5463 break;
5464
5465 default:
5466 netdev_err(netdev, "Got invalid cap rsp %d\n",
5467 crq->query_capability.capability);
5468 }
5469
5470 out:
5471 if (atomic_read(&adapter->running_cap_crqs) == 0)
5472 send_request_cap(adapter, 0);
5473 }
5474
5475 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5476 {
5477 union ibmvnic_crq crq;
5478 int rc;
5479
5480 memset(&crq, 0, sizeof(crq));
5481 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5482 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5483
5484 mutex_lock(&adapter->fw_lock);
5485 adapter->fw_done_rc = 0;
5486 reinit_completion(&adapter->fw_done);
5487
5488 rc = ibmvnic_send_crq(adapter, &crq);
5489 if (rc) {
5490 mutex_unlock(&adapter->fw_lock);
5491 return rc;
5492 }
5493
5494 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5495 if (rc) {
5496 mutex_unlock(&adapter->fw_lock);
5497 return rc;
5498 }
5499
5500 mutex_unlock(&adapter->fw_lock);
5501 return adapter->fw_done_rc ? -EIO : 0;
5502 }
5503
5504 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5505 struct ibmvnic_adapter *adapter)
5506 {
5507 struct net_device *netdev = adapter->netdev;
5508 int rc;
5509 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5510
5511 rc = crq->query_phys_parms_rsp.rc.code;
5512 if (rc) {
5513 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5514 return rc;
5515 }
5516 switch (rspeed) {
5517 case IBMVNIC_10MBPS:
5518 adapter->speed = SPEED_10;
5519 break;
5520 case IBMVNIC_100MBPS:
5521 adapter->speed = SPEED_100;
5522 break;
5523 case IBMVNIC_1GBPS:
5524 adapter->speed = SPEED_1000;
5525 break;
5526 case IBMVNIC_10GBPS:
5527 adapter->speed = SPEED_10000;
5528 break;
5529 case IBMVNIC_25GBPS:
5530 adapter->speed = SPEED_25000;
5531 break;
5532 case IBMVNIC_40GBPS:
5533 adapter->speed = SPEED_40000;
5534 break;
5535 case IBMVNIC_50GBPS:
5536 adapter->speed = SPEED_50000;
5537 break;
5538 case IBMVNIC_100GBPS:
5539 adapter->speed = SPEED_100000;
5540 break;
5541 case IBMVNIC_200GBPS:
5542 adapter->speed = SPEED_200000;
5543 break;
5544 default:
5545 if (netif_carrier_ok(netdev))
5546 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5547 adapter->speed = SPEED_UNKNOWN;
5548 }
5549 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5550 adapter->duplex = DUPLEX_FULL;
5551 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5552 adapter->duplex = DUPLEX_HALF;
5553 else
5554 adapter->duplex = DUPLEX_UNKNOWN;
5555
5556 return rc;
5557 }
5558
5559 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5560 struct ibmvnic_adapter *adapter)
5561 {
5562 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5563 struct net_device *netdev = adapter->netdev;
5564 struct device *dev = &adapter->vdev->dev;
5565 u64 *u64_crq = (u64 *)crq;
5566 long rc;
5567
5568 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5569 (unsigned long)cpu_to_be64(u64_crq[0]),
5570 (unsigned long)cpu_to_be64(u64_crq[1]));
5571 switch (gen_crq->first) {
5572 case IBMVNIC_CRQ_INIT_RSP:
5573 switch (gen_crq->cmd) {
5574 case IBMVNIC_CRQ_INIT:
5575 dev_info(dev, "Partner initialized\n");
5576 adapter->from_passive_init = true;
5577
5578
5579
5580 adapter->login_pending = false;
5581
5582 if (adapter->state == VNIC_DOWN)
5583 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5584 else
5585 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5586
5587 if (rc && rc != -EBUSY) {
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597 netdev_err(netdev,
5598 "Error %ld scheduling failover reset\n",
5599 rc);
5600 adapter->failover_pending = false;
5601 }
5602
5603 if (!completion_done(&adapter->init_done)) {
5604 if (!adapter->init_done_rc)
5605 adapter->init_done_rc = -EAGAIN;
5606 complete(&adapter->init_done);
5607 }
5608
5609 break;
5610 case IBMVNIC_CRQ_INIT_COMPLETE:
5611 dev_info(dev, "Partner initialization complete\n");
5612 adapter->crq.active = true;
5613 send_version_xchg(adapter);
5614 break;
5615 default:
5616 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5617 }
5618 return;
5619 case IBMVNIC_CRQ_XPORT_EVENT:
5620 netif_carrier_off(netdev);
5621 adapter->crq.active = false;
5622
5623
5624
5625 if (!completion_done(&adapter->fw_done)) {
5626 adapter->fw_done_rc = -EIO;
5627 complete(&adapter->fw_done);
5628 }
5629
5630
5631 if (!completion_done(&adapter->init_done)) {
5632 adapter->init_done_rc = -EAGAIN;
5633 complete(&adapter->init_done);
5634 }
5635
5636 if (!completion_done(&adapter->stats_done))
5637 complete(&adapter->stats_done);
5638 if (test_bit(0, &adapter->resetting))
5639 adapter->force_reset_recovery = true;
5640 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5641 dev_info(dev, "Migrated, re-enabling adapter\n");
5642 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5643 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5644 dev_info(dev, "Backing device failover detected\n");
5645 adapter->failover_pending = true;
5646 } else {
5647
5648 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5649 gen_crq->cmd);
5650 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5651 }
5652 return;
5653 case IBMVNIC_CRQ_CMD_RSP:
5654 break;
5655 default:
5656 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5657 gen_crq->first);
5658 return;
5659 }
5660
5661 switch (gen_crq->cmd) {
5662 case VERSION_EXCHANGE_RSP:
5663 rc = crq->version_exchange_rsp.rc.code;
5664 if (rc) {
5665 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5666 break;
5667 }
5668 ibmvnic_version =
5669 be16_to_cpu(crq->version_exchange_rsp.version);
5670 dev_info(dev, "Partner protocol version is %d\n",
5671 ibmvnic_version);
5672 send_query_cap(adapter);
5673 break;
5674 case QUERY_CAPABILITY_RSP:
5675 handle_query_cap_rsp(crq, adapter);
5676 break;
5677 case QUERY_MAP_RSP:
5678 handle_query_map_rsp(crq, adapter);
5679 break;
5680 case REQUEST_MAP_RSP:
5681 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5682 complete(&adapter->fw_done);
5683 break;
5684 case REQUEST_UNMAP_RSP:
5685 handle_request_unmap_rsp(crq, adapter);
5686 break;
5687 case REQUEST_CAPABILITY_RSP:
5688 handle_request_cap_rsp(crq, adapter);
5689 break;
5690 case LOGIN_RSP:
5691 netdev_dbg(netdev, "Got Login Response\n");
5692 handle_login_rsp(crq, adapter);
5693 break;
5694 case LOGICAL_LINK_STATE_RSP:
5695 netdev_dbg(netdev,
5696 "Got Logical Link State Response, state: %d rc: %d\n",
5697 crq->logical_link_state_rsp.link_state,
5698 crq->logical_link_state_rsp.rc.code);
5699 adapter->logical_link_state =
5700 crq->logical_link_state_rsp.link_state;
5701 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5702 complete(&adapter->init_done);
5703 break;
5704 case LINK_STATE_INDICATION:
5705 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5706 adapter->phys_link_state =
5707 crq->link_state_indication.phys_link_state;
5708 adapter->logical_link_state =
5709 crq->link_state_indication.logical_link_state;
5710 if (adapter->phys_link_state && adapter->logical_link_state)
5711 netif_carrier_on(netdev);
5712 else
5713 netif_carrier_off(netdev);
5714 break;
5715 case CHANGE_MAC_ADDR_RSP:
5716 netdev_dbg(netdev, "Got MAC address change Response\n");
5717 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5718 break;
5719 case ERROR_INDICATION:
5720 netdev_dbg(netdev, "Got Error Indication\n");
5721 handle_error_indication(crq, adapter);
5722 break;
5723 case REQUEST_STATISTICS_RSP:
5724 netdev_dbg(netdev, "Got Statistics Response\n");
5725 complete(&adapter->stats_done);
5726 break;
5727 case QUERY_IP_OFFLOAD_RSP:
5728 netdev_dbg(netdev, "Got Query IP offload Response\n");
5729 handle_query_ip_offload_rsp(adapter);
5730 break;
5731 case MULTICAST_CTRL_RSP:
5732 netdev_dbg(netdev, "Got multicast control Response\n");
5733 break;
5734 case CONTROL_IP_OFFLOAD_RSP:
5735 netdev_dbg(netdev, "Got Control IP offload Response\n");
5736 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5737 sizeof(adapter->ip_offload_ctrl),
5738 DMA_TO_DEVICE);
5739 complete(&adapter->init_done);
5740 break;
5741 case COLLECT_FW_TRACE_RSP:
5742 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5743 complete(&adapter->fw_done);
5744 break;
5745 case GET_VPD_SIZE_RSP:
5746 handle_vpd_size_rsp(crq, adapter);
5747 break;
5748 case GET_VPD_RSP:
5749 handle_vpd_rsp(crq, adapter);
5750 break;
5751 case QUERY_PHYS_PARMS_RSP:
5752 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5753 complete(&adapter->fw_done);
5754 break;
5755 default:
5756 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5757 gen_crq->cmd);
5758 }
5759 }
5760
5761 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5762 {
5763 struct ibmvnic_adapter *adapter = instance;
5764
5765 tasklet_schedule(&adapter->tasklet);
5766 return IRQ_HANDLED;
5767 }
5768
5769 static void ibmvnic_tasklet(struct tasklet_struct *t)
5770 {
5771 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5772 struct ibmvnic_crq_queue *queue = &adapter->crq;
5773 union ibmvnic_crq *crq;
5774 unsigned long flags;
5775
5776 spin_lock_irqsave(&queue->lock, flags);
5777
5778
5779 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5780
5781
5782
5783
5784
5785 dma_rmb();
5786 ibmvnic_handle_crq(crq, adapter);
5787 crq->generic.first = 0;
5788 }
5789
5790 spin_unlock_irqrestore(&queue->lock, flags);
5791 }
5792
5793 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5794 {
5795 struct vio_dev *vdev = adapter->vdev;
5796 int rc;
5797
5798 do {
5799 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5800 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5801
5802 if (rc)
5803 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5804
5805 return rc;
5806 }
5807
5808 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5809 {
5810 struct ibmvnic_crq_queue *crq = &adapter->crq;
5811 struct device *dev = &adapter->vdev->dev;
5812 struct vio_dev *vdev = adapter->vdev;
5813 int rc;
5814
5815
5816 do {
5817 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5818 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5819
5820
5821 if (!crq->msgs)
5822 return -EINVAL;
5823
5824 memset(crq->msgs, 0, PAGE_SIZE);
5825 crq->cur = 0;
5826 crq->active = false;
5827
5828
5829 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5830 crq->msg_token, PAGE_SIZE);
5831
5832 if (rc == H_CLOSED)
5833
5834 dev_warn(dev, "Partner adapter not ready\n");
5835 else if (rc != 0)
5836 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5837
5838 return rc;
5839 }
5840
5841 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5842 {
5843 struct ibmvnic_crq_queue *crq = &adapter->crq;
5844 struct vio_dev *vdev = adapter->vdev;
5845 long rc;
5846
5847 if (!crq->msgs)
5848 return;
5849
5850 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5851 free_irq(vdev->irq, adapter);
5852 tasklet_kill(&adapter->tasklet);
5853 do {
5854 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5855 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5856
5857 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5858 DMA_BIDIRECTIONAL);
5859 free_page((unsigned long)crq->msgs);
5860 crq->msgs = NULL;
5861 crq->active = false;
5862 }
5863
5864 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5865 {
5866 struct ibmvnic_crq_queue *crq = &adapter->crq;
5867 struct device *dev = &adapter->vdev->dev;
5868 struct vio_dev *vdev = adapter->vdev;
5869 int rc, retrc = -ENOMEM;
5870
5871 if (crq->msgs)
5872 return 0;
5873
5874 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5875
5876
5877 if (!crq->msgs)
5878 return -ENOMEM;
5879
5880 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5881 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5882 DMA_BIDIRECTIONAL);
5883 if (dma_mapping_error(dev, crq->msg_token))
5884 goto map_failed;
5885
5886 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5887 crq->msg_token, PAGE_SIZE);
5888
5889 if (rc == H_RESOURCE)
5890
5891 rc = ibmvnic_reset_crq(adapter);
5892 retrc = rc;
5893
5894 if (rc == H_CLOSED) {
5895 dev_warn(dev, "Partner adapter not ready\n");
5896 } else if (rc) {
5897 dev_warn(dev, "Error %d opening adapter\n", rc);
5898 goto reg_crq_failed;
5899 }
5900
5901 retrc = 0;
5902
5903 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5904
5905 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5906 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5907 adapter->vdev->unit_address);
5908 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5909 if (rc) {
5910 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5911 vdev->irq, rc);
5912 goto req_irq_failed;
5913 }
5914
5915 rc = vio_enable_interrupts(vdev);
5916 if (rc) {
5917 dev_err(dev, "Error %d enabling interrupts\n", rc);
5918 goto req_irq_failed;
5919 }
5920
5921 crq->cur = 0;
5922 spin_lock_init(&crq->lock);
5923
5924
5925 tasklet_schedule(&adapter->tasklet);
5926
5927 return retrc;
5928
5929 req_irq_failed:
5930 tasklet_kill(&adapter->tasklet);
5931 do {
5932 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5933 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5934 reg_crq_failed:
5935 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5936 map_failed:
5937 free_page((unsigned long)crq->msgs);
5938 crq->msgs = NULL;
5939 return retrc;
5940 }
5941
5942 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5943 {
5944 struct device *dev = &adapter->vdev->dev;
5945 unsigned long timeout = msecs_to_jiffies(20000);
5946 u64 old_num_rx_queues = adapter->req_rx_queues;
5947 u64 old_num_tx_queues = adapter->req_tx_queues;
5948 int rc;
5949
5950 adapter->from_passive_init = false;
5951
5952 rc = ibmvnic_send_crq_init(adapter);
5953 if (rc) {
5954 dev_err(dev, "Send crq init failed with error %d\n", rc);
5955 return rc;
5956 }
5957
5958 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5959 dev_err(dev, "Initialization sequence timed out\n");
5960 return -ETIMEDOUT;
5961 }
5962
5963 if (adapter->init_done_rc) {
5964 release_crq_queue(adapter);
5965 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
5966 return adapter->init_done_rc;
5967 }
5968
5969 if (adapter->from_passive_init) {
5970 adapter->state = VNIC_OPEN;
5971 adapter->from_passive_init = false;
5972 dev_err(dev, "CRQ-init failed, passive-init\n");
5973 return -EINVAL;
5974 }
5975
5976 if (reset &&
5977 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5978 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5979 if (adapter->req_rx_queues != old_num_rx_queues ||
5980 adapter->req_tx_queues != old_num_tx_queues) {
5981 release_sub_crqs(adapter, 0);
5982 rc = init_sub_crqs(adapter);
5983 } else {
5984
5985
5986
5987
5988
5989
5990
5991 clean_tx_pools(adapter);
5992
5993 rc = reset_sub_crq_queues(adapter);
5994 }
5995 } else {
5996 rc = init_sub_crqs(adapter);
5997 }
5998
5999 if (rc) {
6000 dev_err(dev, "Initialization of sub crqs failed\n");
6001 release_crq_queue(adapter);
6002 return rc;
6003 }
6004
6005 rc = init_sub_crq_irqs(adapter);
6006 if (rc) {
6007 dev_err(dev, "Failed to initialize sub crq irqs\n");
6008 release_crq_queue(adapter);
6009 }
6010
6011 return rc;
6012 }
6013
6014 static struct device_attribute dev_attr_failover;
6015
6016 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6017 {
6018 struct ibmvnic_adapter *adapter;
6019 struct net_device *netdev;
6020 unsigned char *mac_addr_p;
6021 unsigned long flags;
6022 bool init_success;
6023 int rc;
6024
6025 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6026 dev->unit_address);
6027
6028 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6029 VETH_MAC_ADDR, NULL);
6030 if (!mac_addr_p) {
6031 dev_err(&dev->dev,
6032 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6033 __FILE__, __LINE__);
6034 return 0;
6035 }
6036
6037 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6038 IBMVNIC_MAX_QUEUES);
6039 if (!netdev)
6040 return -ENOMEM;
6041
6042 adapter = netdev_priv(netdev);
6043 adapter->state = VNIC_PROBING;
6044 dev_set_drvdata(&dev->dev, netdev);
6045 adapter->vdev = dev;
6046 adapter->netdev = netdev;
6047 adapter->login_pending = false;
6048 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6049
6050 bitmap_set(adapter->map_ids, 0, 1);
6051
6052 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6053 eth_hw_addr_set(netdev, adapter->mac_addr);
6054 netdev->irq = dev->irq;
6055 netdev->netdev_ops = &ibmvnic_netdev_ops;
6056 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6057 SET_NETDEV_DEV(netdev, &dev->dev);
6058
6059 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6060 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6061 __ibmvnic_delayed_reset);
6062 INIT_LIST_HEAD(&adapter->rwi_list);
6063 spin_lock_init(&adapter->rwi_lock);
6064 spin_lock_init(&adapter->state_lock);
6065 mutex_init(&adapter->fw_lock);
6066 init_completion(&adapter->probe_done);
6067 init_completion(&adapter->init_done);
6068 init_completion(&adapter->fw_done);
6069 init_completion(&adapter->reset_done);
6070 init_completion(&adapter->stats_done);
6071 clear_bit(0, &adapter->resetting);
6072 adapter->prev_rx_buf_sz = 0;
6073 adapter->prev_mtu = 0;
6074
6075 init_success = false;
6076 do {
6077 reinit_init_done(adapter);
6078
6079
6080
6081
6082 adapter->failover_pending = false;
6083
6084
6085
6086
6087
6088 release_crq_queue(adapter);
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100 spin_lock_irqsave(&adapter->rwi_lock, flags);
6101 flush_reset_queue(adapter);
6102 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6103
6104 rc = init_crq_queue(adapter);
6105 if (rc) {
6106 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6107 rc);
6108 goto ibmvnic_init_fail;
6109 }
6110
6111 rc = ibmvnic_reset_init(adapter, false);
6112 } while (rc == -EAGAIN);
6113
6114
6115
6116
6117
6118
6119 if (!rc)
6120 init_success = true;
6121
6122 rc = init_stats_buffers(adapter);
6123 if (rc)
6124 goto ibmvnic_init_fail;
6125
6126 rc = init_stats_token(adapter);
6127 if (rc)
6128 goto ibmvnic_stats_fail;
6129
6130 rc = device_create_file(&dev->dev, &dev_attr_failover);
6131 if (rc)
6132 goto ibmvnic_dev_file_err;
6133
6134 netif_carrier_off(netdev);
6135
6136 if (init_success) {
6137 adapter->state = VNIC_PROBED;
6138 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6139 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6140 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6141 } else {
6142 adapter->state = VNIC_DOWN;
6143 }
6144
6145 adapter->wait_for_reset = false;
6146 adapter->last_reset_time = jiffies;
6147
6148 rc = register_netdev(netdev);
6149 if (rc) {
6150 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6151 goto ibmvnic_register_fail;
6152 }
6153 dev_info(&dev->dev, "ibmvnic registered\n");
6154
6155 complete(&adapter->probe_done);
6156
6157 return 0;
6158
6159 ibmvnic_register_fail:
6160 device_remove_file(&dev->dev, &dev_attr_failover);
6161
6162 ibmvnic_dev_file_err:
6163 release_stats_token(adapter);
6164
6165 ibmvnic_stats_fail:
6166 release_stats_buffers(adapter);
6167
6168 ibmvnic_init_fail:
6169 release_sub_crqs(adapter, 1);
6170 release_crq_queue(adapter);
6171
6172
6173
6174
6175 adapter->state = VNIC_REMOVING;
6176 complete(&adapter->probe_done);
6177 flush_work(&adapter->ibmvnic_reset);
6178 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6179
6180 flush_reset_queue(adapter);
6181
6182 mutex_destroy(&adapter->fw_lock);
6183 free_netdev(netdev);
6184
6185 return rc;
6186 }
6187
6188 static void ibmvnic_remove(struct vio_dev *dev)
6189 {
6190 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6191 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6192 unsigned long flags;
6193
6194 spin_lock_irqsave(&adapter->state_lock, flags);
6195
6196
6197
6198
6199
6200
6201
6202
6203 spin_lock(&adapter->rwi_lock);
6204 adapter->state = VNIC_REMOVING;
6205 spin_unlock(&adapter->rwi_lock);
6206
6207 spin_unlock_irqrestore(&adapter->state_lock, flags);
6208
6209 flush_work(&adapter->ibmvnic_reset);
6210 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6211
6212 rtnl_lock();
6213 unregister_netdevice(netdev);
6214
6215 release_resources(adapter);
6216 release_rx_pools(adapter);
6217 release_tx_pools(adapter);
6218 release_sub_crqs(adapter, 1);
6219 release_crq_queue(adapter);
6220
6221 release_stats_token(adapter);
6222 release_stats_buffers(adapter);
6223
6224 adapter->state = VNIC_REMOVED;
6225
6226 rtnl_unlock();
6227 mutex_destroy(&adapter->fw_lock);
6228 device_remove_file(&dev->dev, &dev_attr_failover);
6229 free_netdev(netdev);
6230 dev_set_drvdata(&dev->dev, NULL);
6231 }
6232
6233 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6234 const char *buf, size_t count)
6235 {
6236 struct net_device *netdev = dev_get_drvdata(dev);
6237 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6238 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6239 __be64 session_token;
6240 long rc;
6241
6242 if (!sysfs_streq(buf, "1"))
6243 return -EINVAL;
6244
6245 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6246 H_GET_SESSION_TOKEN, 0, 0, 0);
6247 if (rc) {
6248 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6249 rc);
6250 goto last_resort;
6251 }
6252
6253 session_token = (__be64)retbuf[0];
6254 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6255 be64_to_cpu(session_token));
6256 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6257 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6258 if (rc) {
6259 netdev_err(netdev,
6260 "H_VIOCTL initiated failover failed, rc %ld\n",
6261 rc);
6262 goto last_resort;
6263 }
6264
6265 return count;
6266
6267 last_resort:
6268 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6269 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6270
6271 return count;
6272 }
6273 static DEVICE_ATTR_WO(failover);
6274
6275 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6276 {
6277 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6278 struct ibmvnic_adapter *adapter;
6279 struct iommu_table *tbl;
6280 unsigned long ret = 0;
6281 int i;
6282
6283 tbl = get_iommu_table_base(&vdev->dev);
6284
6285
6286 if (!netdev)
6287 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6288
6289 adapter = netdev_priv(netdev);
6290
6291 ret += PAGE_SIZE;
6292 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6293
6294 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6295 ret += 4 * PAGE_SIZE;
6296
6297 for (i = 0; i < adapter->num_active_rx_pools; i++)
6298 ret += adapter->rx_pool[i].size *
6299 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6300
6301 return ret;
6302 }
6303
6304 static int ibmvnic_resume(struct device *dev)
6305 {
6306 struct net_device *netdev = dev_get_drvdata(dev);
6307 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6308
6309 if (adapter->state != VNIC_OPEN)
6310 return 0;
6311
6312 tasklet_schedule(&adapter->tasklet);
6313
6314 return 0;
6315 }
6316
6317 static const struct vio_device_id ibmvnic_device_table[] = {
6318 {"network", "IBM,vnic"},
6319 {"", "" }
6320 };
6321 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6322
6323 static const struct dev_pm_ops ibmvnic_pm_ops = {
6324 .resume = ibmvnic_resume
6325 };
6326
6327 static struct vio_driver ibmvnic_driver = {
6328 .id_table = ibmvnic_device_table,
6329 .probe = ibmvnic_probe,
6330 .remove = ibmvnic_remove,
6331 .get_desired_dma = ibmvnic_get_desired_dma,
6332 .name = ibmvnic_driver_name,
6333 .pm = &ibmvnic_pm_ops,
6334 };
6335
6336
6337 static int __init ibmvnic_module_init(void)
6338 {
6339 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6340 IBMVNIC_DRIVER_VERSION);
6341
6342 return vio_register_driver(&ibmvnic_driver);
6343 }
6344
6345 static void __exit ibmvnic_module_exit(void)
6346 {
6347 vio_unregister_driver(&ibmvnic_driver);
6348 }
6349
6350 module_init(ibmvnic_module_init);
6351 module_exit(ibmvnic_module_exit);