0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/bitops.h>
0010 #include <linux/types.h>
0011 #include <linux/module.h>
0012 #include <linux/list.h>
0013 #include <linux/pci.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/sched.h>
0017 #include <linux/slab.h>
0018 #include <linux/dmapool.h>
0019 #include <linux/mempool.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/kthread.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/errno.h>
0024 #include <linux/ioport.h>
0025 #include <linux/in.h>
0026 #include <linux/ip.h>
0027 #include <linux/ipv6.h>
0028 #include <net/ipv6.h>
0029 #include <linux/tcp.h>
0030 #include <linux/udp.h>
0031 #include <linux/if_arp.h>
0032 #include <linux/if_ether.h>
0033 #include <linux/netdevice.h>
0034 #include <linux/etherdevice.h>
0035 #include <linux/ethtool.h>
0036 #include <linux/if_vlan.h>
0037 #include <linux/skbuff.h>
0038 #include <linux/delay.h>
0039 #include <linux/mm.h>
0040 #include <linux/vmalloc.h>
0041 #include <linux/prefetch.h>
0042 #include <net/ip6_checksum.h>
0043
0044 #include "qlge.h"
0045 #include "qlge_devlink.h"
0046
0047 char qlge_driver_name[] = DRV_NAME;
0048 const char qlge_driver_version[] = DRV_VERSION;
0049
0050 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
0051 MODULE_DESCRIPTION(DRV_STRING " ");
0052 MODULE_LICENSE("GPL");
0053 MODULE_VERSION(DRV_VERSION);
0054
0055 static const u32 default_msg =
0056 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
0057 NETIF_MSG_IFDOWN |
0058 NETIF_MSG_IFUP |
0059 NETIF_MSG_RX_ERR |
0060 NETIF_MSG_TX_ERR |
0061 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
0062
0063 static int debug = -1;
0064 module_param(debug, int, 0664);
0065 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
0066
0067 #define MSIX_IRQ 0
0068 #define MSI_IRQ 1
0069 #define LEG_IRQ 2
0070 static int qlge_irq_type = MSIX_IRQ;
0071 module_param(qlge_irq_type, int, 0664);
0072 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
0073
0074 static int qlge_mpi_coredump;
0075 module_param(qlge_mpi_coredump, int, 0);
0076 MODULE_PARM_DESC(qlge_mpi_coredump,
0077 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
0078
0079 static int qlge_force_coredump;
0080 module_param(qlge_force_coredump, int, 0);
0081 MODULE_PARM_DESC(qlge_force_coredump,
0082 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
0083
0084 static const struct pci_device_id qlge_pci_tbl[] = {
0085 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
0086 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
0087
0088 {0,}
0089 };
0090
0091 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
0092
0093 static int qlge_wol(struct qlge_adapter *);
0094 static void qlge_set_multicast_list(struct net_device *);
0095 static int qlge_adapter_down(struct qlge_adapter *);
0096 static int qlge_adapter_up(struct qlge_adapter *);
0097
0098
0099
0100
0101
0102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
0103 {
0104 u32 sem_bits = 0;
0105
0106 switch (sem_mask) {
0107 case SEM_XGMAC0_MASK:
0108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
0109 break;
0110 case SEM_XGMAC1_MASK:
0111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
0112 break;
0113 case SEM_ICB_MASK:
0114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
0115 break;
0116 case SEM_MAC_ADDR_MASK:
0117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
0118 break;
0119 case SEM_FLASH_MASK:
0120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
0121 break;
0122 case SEM_PROBE_MASK:
0123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
0124 break;
0125 case SEM_RT_IDX_MASK:
0126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
0127 break;
0128 case SEM_PROC_REG_MASK:
0129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
0130 break;
0131 default:
0132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
0133 return -EINVAL;
0134 }
0135
0136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
0137 return !(qlge_read32(qdev, SEM) & sem_bits);
0138 }
0139
0140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
0141 {
0142 unsigned int wait_count = 30;
0143
0144 do {
0145 if (!qlge_sem_trylock(qdev, sem_mask))
0146 return 0;
0147 udelay(100);
0148 } while (--wait_count);
0149 return -ETIMEDOUT;
0150 }
0151
0152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
0153 {
0154 qlge_write32(qdev, SEM, sem_mask);
0155 qlge_read32(qdev, SEM);
0156 }
0157
0158
0159
0160
0161
0162
0163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
0164 {
0165 u32 temp;
0166 int count;
0167
0168 for (count = 0; count < UDELAY_COUNT; count++) {
0169 temp = qlge_read32(qdev, reg);
0170
0171
0172 if (temp & err_bit) {
0173 netif_alert(qdev, probe, qdev->ndev,
0174 "register 0x%.08x access error, value = 0x%.08x!.\n",
0175 reg, temp);
0176 return -EIO;
0177 } else if (temp & bit) {
0178 return 0;
0179 }
0180 udelay(UDELAY_DELAY);
0181 }
0182 netif_alert(qdev, probe, qdev->ndev,
0183 "Timed out waiting for reg %x to come ready.\n", reg);
0184 return -ETIMEDOUT;
0185 }
0186
0187
0188
0189
0190 static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
0191 {
0192 int count;
0193 u32 temp;
0194
0195 for (count = 0; count < UDELAY_COUNT; count++) {
0196 temp = qlge_read32(qdev, CFG);
0197 if (temp & CFG_LE)
0198 return -EIO;
0199 if (!(temp & bit))
0200 return 0;
0201 udelay(UDELAY_DELAY);
0202 }
0203 return -ETIMEDOUT;
0204 }
0205
0206
0207
0208
0209 int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
0210 u16 q_id)
0211 {
0212 u64 map;
0213 int status = 0;
0214 int direction;
0215 u32 mask;
0216 u32 value;
0217
0218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
0219 direction = DMA_TO_DEVICE;
0220 else
0221 direction = DMA_FROM_DEVICE;
0222
0223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
0224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
0225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
0226 return -ENOMEM;
0227 }
0228
0229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
0230 if (status)
0231 goto lock_failed;
0232
0233 status = qlge_wait_cfg(qdev, bit);
0234 if (status) {
0235 netif_err(qdev, ifup, qdev->ndev,
0236 "Timed out waiting for CFG to come ready.\n");
0237 goto exit;
0238 }
0239
0240 qlge_write32(qdev, ICB_L, (u32)map);
0241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
0242
0243 mask = CFG_Q_MASK | (bit << 16);
0244 value = bit | (q_id << CFG_Q_SHIFT);
0245 qlge_write32(qdev, CFG, (mask | value));
0246
0247
0248
0249
0250 status = qlge_wait_cfg(qdev, bit);
0251 exit:
0252 qlge_sem_unlock(qdev, SEM_ICB_MASK);
0253 lock_failed:
0254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
0255 return status;
0256 }
0257
0258
0259 int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
0260 u32 *value)
0261 {
0262 u32 offset = 0;
0263 int status;
0264
0265 switch (type) {
0266 case MAC_ADDR_TYPE_MULTI_MAC:
0267 case MAC_ADDR_TYPE_CAM_MAC: {
0268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0269 if (status)
0270 break;
0271 qlge_write32(qdev, MAC_ADDR_IDX,
0272 (offset++) |
0273 (index << MAC_ADDR_IDX_SHIFT) |
0274 MAC_ADDR_ADR | MAC_ADDR_RS |
0275 type);
0276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
0277 if (status)
0278 break;
0279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
0280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0281 if (status)
0282 break;
0283 qlge_write32(qdev, MAC_ADDR_IDX,
0284 (offset++) |
0285 (index << MAC_ADDR_IDX_SHIFT) |
0286 MAC_ADDR_ADR | MAC_ADDR_RS |
0287 type);
0288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
0289 if (status)
0290 break;
0291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
0292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
0293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
0294 MAC_ADDR_MW, 0);
0295 if (status)
0296 break;
0297 qlge_write32(qdev, MAC_ADDR_IDX,
0298 (offset++) |
0299 (index
0300 << MAC_ADDR_IDX_SHIFT) |
0301 MAC_ADDR_ADR |
0302 MAC_ADDR_RS | type);
0303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
0304 MAC_ADDR_MR, 0);
0305 if (status)
0306 break;
0307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
0308 }
0309 break;
0310 }
0311 case MAC_ADDR_TYPE_VLAN:
0312 case MAC_ADDR_TYPE_MULTI_FLTR:
0313 default:
0314 netif_crit(qdev, ifup, qdev->ndev,
0315 "Address type %d not yet supported.\n", type);
0316 status = -EPERM;
0317 }
0318 return status;
0319 }
0320
0321
0322
0323
0324 static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
0325 u32 type, u16 index)
0326 {
0327 u32 offset = 0;
0328 int status = 0;
0329
0330 switch (type) {
0331 case MAC_ADDR_TYPE_MULTI_MAC: {
0332 u32 upper = (addr[0] << 8) | addr[1];
0333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
0334 (addr[5]);
0335
0336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0337 if (status)
0338 break;
0339 qlge_write32(qdev, MAC_ADDR_IDX,
0340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
0341 MAC_ADDR_E);
0342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
0343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0344 if (status)
0345 break;
0346 qlge_write32(qdev, MAC_ADDR_IDX,
0347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
0348 MAC_ADDR_E);
0349
0350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
0351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0352 break;
0353 }
0354 case MAC_ADDR_TYPE_CAM_MAC: {
0355 u32 cam_output;
0356 u32 upper = (addr[0] << 8) | addr[1];
0357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
0358 (addr[5]);
0359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0360 if (status)
0361 break;
0362 qlge_write32(qdev, MAC_ADDR_IDX,
0363 (offset++) |
0364 (index << MAC_ADDR_IDX_SHIFT) |
0365 type);
0366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
0367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0368 if (status)
0369 break;
0370 qlge_write32(qdev, MAC_ADDR_IDX,
0371 (offset++) |
0372 (index << MAC_ADDR_IDX_SHIFT) |
0373 type);
0374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
0375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0376 if (status)
0377 break;
0378 qlge_write32(qdev, MAC_ADDR_IDX,
0379 (offset) |
0380 (index << MAC_ADDR_IDX_SHIFT) |
0381 type);
0382
0383
0384
0385
0386 cam_output = (CAM_OUT_ROUTE_NIC |
0387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
0388 (0 << CAM_OUT_CQ_ID_SHIFT));
0389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
0390 cam_output |= CAM_OUT_RV;
0391
0392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
0393 break;
0394 }
0395 case MAC_ADDR_TYPE_VLAN: {
0396 u32 enable_bit = *((u32 *)&addr[0]);
0397
0398
0399
0400
0401
0402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
0403 if (status)
0404 break;
0405 qlge_write32(qdev, MAC_ADDR_IDX,
0406 offset |
0407 (index << MAC_ADDR_IDX_SHIFT) |
0408 type |
0409 enable_bit);
0410 break;
0411 }
0412 case MAC_ADDR_TYPE_MULTI_FLTR:
0413 default:
0414 netif_crit(qdev, ifup, qdev->ndev,
0415 "Address type %d not yet supported.\n", type);
0416 status = -EPERM;
0417 }
0418 return status;
0419 }
0420
0421
0422
0423
0424
0425 static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
0426 {
0427 int status;
0428 char zero_mac_addr[ETH_ALEN];
0429 char *addr;
0430
0431 if (set) {
0432 addr = &qdev->current_mac_addr[0];
0433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
0434 "Set Mac addr %pM\n", addr);
0435 } else {
0436 eth_zero_addr(zero_mac_addr);
0437 addr = &zero_mac_addr[0];
0438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
0439 "Clearing MAC address\n");
0440 }
0441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
0442 if (status)
0443 return status;
0444 status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
0445 MAC_ADDR_TYPE_CAM_MAC,
0446 qdev->func * MAX_CQ);
0447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
0448 if (status)
0449 netif_err(qdev, ifup, qdev->ndev,
0450 "Failed to init mac address.\n");
0451 return status;
0452 }
0453
0454 void qlge_link_on(struct qlge_adapter *qdev)
0455 {
0456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
0457 netif_carrier_on(qdev->ndev);
0458 qlge_set_mac_addr(qdev, 1);
0459 }
0460
0461 void qlge_link_off(struct qlge_adapter *qdev)
0462 {
0463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
0464 netif_carrier_off(qdev->ndev);
0465 qlge_set_mac_addr(qdev, 0);
0466 }
0467
0468
0469
0470
0471 int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
0472 {
0473 int status = 0;
0474
0475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
0476 if (status)
0477 goto exit;
0478
0479 qlge_write32(qdev, RT_IDX,
0480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
0481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
0482 if (status)
0483 goto exit;
0484 *value = qlge_read32(qdev, RT_DATA);
0485 exit:
0486 return status;
0487 }
0488
0489
0490
0491
0492
0493
0494 static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
0495 int enable)
0496 {
0497 int status = -EINVAL;
0498 u32 value = 0;
0499
0500 switch (mask) {
0501 case RT_IDX_CAM_HIT:
0502 {
0503 value = RT_IDX_DST_CAM_Q |
0504 RT_IDX_TYPE_NICQ |
0505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);
0506 break;
0507 }
0508 case RT_IDX_VALID:
0509 {
0510 value = RT_IDX_DST_DFLT_Q |
0511 RT_IDX_TYPE_NICQ |
0512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);
0513 break;
0514 }
0515 case RT_IDX_ERR:
0516 {
0517 value = RT_IDX_DST_DFLT_Q |
0518 RT_IDX_TYPE_NICQ |
0519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);
0520 break;
0521 }
0522 case RT_IDX_IP_CSUM_ERR:
0523 {
0524 value = RT_IDX_DST_DFLT_Q |
0525 RT_IDX_TYPE_NICQ |
0526 (RT_IDX_IP_CSUM_ERR_SLOT <<
0527 RT_IDX_IDX_SHIFT);
0528 break;
0529 }
0530 case RT_IDX_TU_CSUM_ERR:
0531 {
0532 value = RT_IDX_DST_DFLT_Q |
0533 RT_IDX_TYPE_NICQ |
0534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
0535 RT_IDX_IDX_SHIFT);
0536 break;
0537 }
0538 case RT_IDX_BCAST:
0539 {
0540 value = RT_IDX_DST_DFLT_Q |
0541 RT_IDX_TYPE_NICQ |
0542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);
0543 break;
0544 }
0545 case RT_IDX_MCAST:
0546 {
0547 value = RT_IDX_DST_DFLT_Q |
0548 RT_IDX_TYPE_NICQ |
0549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);
0550 break;
0551 }
0552 case RT_IDX_MCAST_MATCH:
0553 {
0554 value = RT_IDX_DST_DFLT_Q |
0555 RT_IDX_TYPE_NICQ |
0556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);
0557 break;
0558 }
0559 case RT_IDX_RSS_MATCH:
0560 {
0561 value = RT_IDX_DST_RSS |
0562 RT_IDX_TYPE_NICQ |
0563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);
0564 break;
0565 }
0566 case 0:
0567 {
0568 value = RT_IDX_DST_DFLT_Q |
0569 RT_IDX_TYPE_NICQ |
0570 (index << RT_IDX_IDX_SHIFT);
0571 break;
0572 }
0573 default:
0574 netif_err(qdev, ifup, qdev->ndev,
0575 "Mask type %d not yet supported.\n", mask);
0576 status = -EPERM;
0577 goto exit;
0578 }
0579
0580 if (value) {
0581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
0582 if (status)
0583 goto exit;
0584 value |= (enable ? RT_IDX_E : 0);
0585 qlge_write32(qdev, RT_IDX, value);
0586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
0587 }
0588 exit:
0589 return status;
0590 }
0591
0592 static void qlge_enable_interrupts(struct qlge_adapter *qdev)
0593 {
0594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
0595 }
0596
0597 static void qlge_disable_interrupts(struct qlge_adapter *qdev)
0598 {
0599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
0600 }
0601
0602 static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
0603 {
0604 struct intr_context *ctx = &qdev->intr_context[intr];
0605
0606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
0607 }
0608
0609 static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
0610 {
0611 struct intr_context *ctx = &qdev->intr_context[intr];
0612
0613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
0614 }
0615
0616 static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
0617 {
0618 int i;
0619
0620 for (i = 0; i < qdev->intr_count; i++)
0621 qlge_enable_completion_interrupt(qdev, i);
0622 }
0623
0624 static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
0625 {
0626 int status, i;
0627 u16 csum = 0;
0628 __le16 *flash = (__le16 *)&qdev->flash;
0629
0630 status = strncmp((char *)&qdev->flash, str, 4);
0631 if (status) {
0632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
0633 return status;
0634 }
0635
0636 for (i = 0; i < size; i++)
0637 csum += le16_to_cpu(*flash++);
0638
0639 if (csum)
0640 netif_err(qdev, ifup, qdev->ndev,
0641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
0642
0643 return csum;
0644 }
0645
0646 static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
0647 {
0648 int status = 0;
0649
0650 status = qlge_wait_reg_rdy(qdev,
0651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
0652 if (status)
0653 goto exit;
0654
0655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
0656
0657 status = qlge_wait_reg_rdy(qdev,
0658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
0659 if (status)
0660 goto exit;
0661
0662
0663
0664
0665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
0666 exit:
0667 return status;
0668 }
0669
0670 static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
0671 {
0672 u32 i, size;
0673 int status;
0674 __le32 *p = (__le32 *)&qdev->flash;
0675 u32 offset;
0676 u8 mac_addr[6];
0677
0678
0679
0680
0681 if (!qdev->port)
0682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
0683 else
0684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
0685
0686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
0687 return -ETIMEDOUT;
0688
0689 size = sizeof(struct flash_params_8000) / sizeof(u32);
0690 for (i = 0; i < size; i++, p++) {
0691 status = qlge_read_flash_word(qdev, i + offset, p);
0692 if (status) {
0693 netif_err(qdev, ifup, qdev->ndev,
0694 "Error reading flash.\n");
0695 goto exit;
0696 }
0697 }
0698
0699 status = qlge_validate_flash(qdev,
0700 sizeof(struct flash_params_8000) /
0701 sizeof(u16),
0702 "8000");
0703 if (status) {
0704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
0705 status = -EINVAL;
0706 goto exit;
0707 }
0708
0709
0710
0711
0712 if (qdev->flash.flash_params_8000.data_type1 == 2)
0713 memcpy(mac_addr,
0714 qdev->flash.flash_params_8000.mac_addr1,
0715 qdev->ndev->addr_len);
0716 else
0717 memcpy(mac_addr,
0718 qdev->flash.flash_params_8000.mac_addr,
0719 qdev->ndev->addr_len);
0720
0721 if (!is_valid_ether_addr(mac_addr)) {
0722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
0723 status = -EINVAL;
0724 goto exit;
0725 }
0726
0727 eth_hw_addr_set(qdev->ndev, mac_addr);
0728
0729 exit:
0730 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
0731 return status;
0732 }
0733
0734 static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
0735 {
0736 int i;
0737 int status;
0738 __le32 *p = (__le32 *)&qdev->flash;
0739 u32 offset = 0;
0740 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
0741
0742
0743
0744
0745 if (qdev->port)
0746 offset = size;
0747
0748 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
0749 return -ETIMEDOUT;
0750
0751 for (i = 0; i < size; i++, p++) {
0752 status = qlge_read_flash_word(qdev, i + offset, p);
0753 if (status) {
0754 netif_err(qdev, ifup, qdev->ndev,
0755 "Error reading flash.\n");
0756 goto exit;
0757 }
0758 }
0759
0760 status = qlge_validate_flash(qdev,
0761 sizeof(struct flash_params_8012) /
0762 sizeof(u16),
0763 "8012");
0764 if (status) {
0765 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
0766 status = -EINVAL;
0767 goto exit;
0768 }
0769
0770 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
0771 status = -EINVAL;
0772 goto exit;
0773 }
0774
0775 eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
0776
0777 exit:
0778 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
0779 return status;
0780 }
0781
0782
0783
0784
0785
0786 static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
0787 {
0788 int status;
0789
0790 status = qlge_wait_reg_rdy(qdev,
0791 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
0792 if (status)
0793 return status;
0794
0795 qlge_write32(qdev, XGMAC_DATA, data);
0796
0797 qlge_write32(qdev, XGMAC_ADDR, reg);
0798 return status;
0799 }
0800
0801
0802
0803
0804
0805 int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
0806 {
0807 int status = 0;
0808
0809 status = qlge_wait_reg_rdy(qdev,
0810 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
0811 if (status)
0812 goto exit;
0813
0814 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
0815
0816 status = qlge_wait_reg_rdy(qdev,
0817 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
0818 if (status)
0819 goto exit;
0820
0821 *data = qlge_read32(qdev, XGMAC_DATA);
0822 exit:
0823 return status;
0824 }
0825
0826
0827 int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
0828 {
0829 int status = 0;
0830 u32 hi = 0;
0831 u32 lo = 0;
0832
0833 status = qlge_read_xgmac_reg(qdev, reg, &lo);
0834 if (status)
0835 goto exit;
0836
0837 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
0838 if (status)
0839 goto exit;
0840
0841 *data = (u64)lo | ((u64)hi << 32);
0842
0843 exit:
0844 return status;
0845 }
0846
0847 static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
0848 {
0849 int status;
0850
0851
0852
0853
0854 status = qlge_mb_about_fw(qdev);
0855 if (status)
0856 goto exit;
0857 status = qlge_mb_get_fw_state(qdev);
0858 if (status)
0859 goto exit;
0860
0861 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
0862 exit:
0863 return status;
0864 }
0865
0866
0867
0868
0869
0870
0871
0872 static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
0873 {
0874 int status = 0;
0875 u32 data;
0876
0877 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
0878
0879
0880
0881 netif_info(qdev, link, qdev->ndev,
0882 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
0883 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
0884 if (status) {
0885 netif_crit(qdev, link, qdev->ndev,
0886 "Port initialize timed out.\n");
0887 }
0888 return status;
0889 }
0890
0891 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
0892
0893 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
0894 if (status)
0895 goto end;
0896 data |= GLOBAL_CFG_RESET;
0897 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
0898 if (status)
0899 goto end;
0900
0901
0902 data &= ~GLOBAL_CFG_RESET;
0903 data |= GLOBAL_CFG_JUMBO;
0904 data |= GLOBAL_CFG_TX_STAT_EN;
0905 data |= GLOBAL_CFG_RX_STAT_EN;
0906 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
0907 if (status)
0908 goto end;
0909
0910
0911 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
0912 if (status)
0913 goto end;
0914 data &= ~TX_CFG_RESET;
0915 data |= TX_CFG_EN;
0916 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
0917 if (status)
0918 goto end;
0919
0920
0921 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
0922 if (status)
0923 goto end;
0924 data &= ~RX_CFG_RESET;
0925 data |= RX_CFG_EN;
0926 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
0927 if (status)
0928 goto end;
0929
0930
0931 status =
0932 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
0933 if (status)
0934 goto end;
0935 status =
0936 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
0937 if (status)
0938 goto end;
0939
0940
0941 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
0942 end:
0943 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
0944 return status;
0945 }
0946
0947 static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
0948 {
0949 return PAGE_SIZE << qdev->lbq_buf_order;
0950 }
0951
0952 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
0953 {
0954 struct qlge_bq_desc *bq_desc;
0955
0956 bq_desc = &bq->queue[bq->next_to_clean];
0957 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
0958
0959 return bq_desc;
0960 }
0961
0962 static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
0963 struct rx_ring *rx_ring)
0964 {
0965 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
0966
0967 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
0968 qdev->lbq_buf_size, DMA_FROM_DEVICE);
0969
0970 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
0971 qlge_lbq_block_size(qdev)) {
0972
0973 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
0974 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
0975 }
0976
0977 return lbq_desc;
0978 }
0979
0980
0981 static void qlge_update_cq(struct rx_ring *rx_ring)
0982 {
0983 rx_ring->cnsmr_idx++;
0984 rx_ring->curr_entry++;
0985 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
0986 rx_ring->cnsmr_idx = 0;
0987 rx_ring->curr_entry = rx_ring->cq_base;
0988 }
0989 }
0990
0991 static void qlge_write_cq_idx(struct rx_ring *rx_ring)
0992 {
0993 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
0994 }
0995
0996 static const char * const bq_type_name[] = {
0997 [QLGE_SB] = "sbq",
0998 [QLGE_LB] = "lbq",
0999 };
1000
1001
1002 static int qlge_refill_sb(struct rx_ring *rx_ring,
1003 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1004 {
1005 struct qlge_adapter *qdev = rx_ring->qdev;
1006 struct sk_buff *skb;
1007
1008 if (sbq_desc->p.skb)
1009 return 0;
1010
1011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1012 "ring %u sbq: getting new skb for index %d.\n",
1013 rx_ring->cq_id, sbq_desc->index);
1014
1015 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1016 if (!skb)
1017 return -ENOMEM;
1018 skb_reserve(skb, QLGE_SB_PAD);
1019
1020 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1021 SMALL_BUF_MAP_SIZE,
1022 DMA_FROM_DEVICE);
1023 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1024 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1025 dev_kfree_skb_any(skb);
1026 return -EIO;
1027 }
1028 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1029
1030 sbq_desc->p.skb = skb;
1031 return 0;
1032 }
1033
1034
1035 static int qlge_refill_lb(struct rx_ring *rx_ring,
1036 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1037 {
1038 struct qlge_adapter *qdev = rx_ring->qdev;
1039 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1040
1041 if (!master_chunk->page) {
1042 struct page *page;
1043 dma_addr_t dma_addr;
1044
1045 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1046 if (unlikely(!page))
1047 return -ENOMEM;
1048 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1049 qlge_lbq_block_size(qdev),
1050 DMA_FROM_DEVICE);
1051 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1052 __free_pages(page, qdev->lbq_buf_order);
1053 netif_err(qdev, drv, qdev->ndev,
1054 "PCI mapping failed.\n");
1055 return -EIO;
1056 }
1057 master_chunk->page = page;
1058 master_chunk->va = page_address(page);
1059 master_chunk->offset = 0;
1060 rx_ring->chunk_dma_addr = dma_addr;
1061 }
1062
1063 lbq_desc->p.pg_chunk = *master_chunk;
1064 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1065 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1066 lbq_desc->p.pg_chunk.offset);
1067
1068
1069
1070
1071 master_chunk->offset += qdev->lbq_buf_size;
1072 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1073 master_chunk->page = NULL;
1074 } else {
1075 master_chunk->va += qdev->lbq_buf_size;
1076 get_page(master_chunk->page);
1077 }
1078
1079 return 0;
1080 }
1081
1082
1083 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1084 {
1085 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1086 struct qlge_adapter *qdev = rx_ring->qdev;
1087 struct qlge_bq_desc *bq_desc;
1088 int refill_count;
1089 int retval;
1090 int i;
1091
1092 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1093 bq->next_to_use);
1094 if (!refill_count)
1095 return 0;
1096
1097 i = bq->next_to_use;
1098 bq_desc = &bq->queue[i];
1099 i -= QLGE_BQ_LEN;
1100 do {
1101 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1102 "ring %u %s: try cleaning idx %d\n",
1103 rx_ring->cq_id, bq_type_name[bq->type], i);
1104
1105 if (bq->type == QLGE_SB)
1106 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1107 else
1108 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1109 if (retval < 0) {
1110 netif_err(qdev, ifup, qdev->ndev,
1111 "ring %u %s: Could not get a page chunk, idx %d\n",
1112 rx_ring->cq_id, bq_type_name[bq->type], i);
1113 break;
1114 }
1115
1116 bq_desc++;
1117 i++;
1118 if (unlikely(!i)) {
1119 bq_desc = &bq->queue[0];
1120 i -= QLGE_BQ_LEN;
1121 }
1122 refill_count--;
1123 } while (refill_count);
1124 i += QLGE_BQ_LEN;
1125
1126 if (bq->next_to_use != i) {
1127 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1128 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1129 "ring %u %s: updating prod idx = %d.\n",
1130 rx_ring->cq_id, bq_type_name[bq->type],
1131 i);
1132 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1133 }
1134 bq->next_to_use = i;
1135 }
1136
1137 return retval;
1138 }
1139
1140 static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1141 unsigned long delay)
1142 {
1143 bool sbq_fail, lbq_fail;
1144
1145 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1146 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1147
1148
1149
1150
1151
1152
1153
1154 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1155 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1156 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1157
1158
1159
1160
1161 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1162 &rx_ring->refill_work, delay);
1163 }
1164
1165 static void qlge_slow_refill(struct work_struct *work)
1166 {
1167 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1168 refill_work.work);
1169 struct napi_struct *napi = &rx_ring->napi;
1170
1171 napi_disable(napi);
1172 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1173 napi_enable(napi);
1174
1175 local_bh_disable();
1176
1177
1178
1179 napi_schedule(napi);
1180
1181 local_bh_enable();
1182 }
1183
1184
1185
1186
1187 static void qlge_unmap_send(struct qlge_adapter *qdev,
1188 struct tx_ring_desc *tx_ring_desc, int mapped)
1189 {
1190 int i;
1191
1192 for (i = 0; i < mapped; i++) {
1193 if (i == 0 || (i == 7 && mapped > 7)) {
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 if (i == 7) {
1204 netif_printk(qdev, tx_done, KERN_DEBUG,
1205 qdev->ndev,
1206 "unmapping OAL area.\n");
1207 }
1208 dma_unmap_single(&qdev->pdev->dev,
1209 dma_unmap_addr(&tx_ring_desc->map[i],
1210 mapaddr),
1211 dma_unmap_len(&tx_ring_desc->map[i],
1212 maplen),
1213 DMA_TO_DEVICE);
1214 } else {
1215 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1216 "unmapping frag %d.\n", i);
1217 dma_unmap_page(&qdev->pdev->dev,
1218 dma_unmap_addr(&tx_ring_desc->map[i],
1219 mapaddr),
1220 dma_unmap_len(&tx_ring_desc->map[i],
1221 maplen), DMA_TO_DEVICE);
1222 }
1223 }
1224 }
1225
1226
1227
1228
1229 static int qlge_map_send(struct qlge_adapter *qdev,
1230 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1231 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1232 {
1233 int len = skb_headlen(skb);
1234 dma_addr_t map;
1235 int frag_idx, err, map_idx = 0;
1236 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1237 int frag_cnt = skb_shinfo(skb)->nr_frags;
1238
1239 if (frag_cnt) {
1240 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1241 "frag_cnt = %d.\n", frag_cnt);
1242 }
1243
1244
1245
1246 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1247
1248 err = dma_mapping_error(&qdev->pdev->dev, map);
1249 if (err) {
1250 netif_err(qdev, tx_queued, qdev->ndev,
1251 "PCI mapping failed with error: %d\n", err);
1252
1253 return NETDEV_TX_BUSY;
1254 }
1255
1256 tbd->len = cpu_to_le32(len);
1257 tbd->addr = cpu_to_le64(map);
1258 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1259 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1260 map_idx++;
1261
1262
1263
1264
1265
1266
1267
1268
1269 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1270 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1271
1272 tbd++;
1273 if (frag_idx == 6 && frag_cnt > 7) {
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1294 sizeof(struct qlge_oal),
1295 DMA_TO_DEVICE);
1296 err = dma_mapping_error(&qdev->pdev->dev, map);
1297 if (err) {
1298 netif_err(qdev, tx_queued, qdev->ndev,
1299 "PCI mapping outbound address list with error: %d\n",
1300 err);
1301 goto map_error;
1302 }
1303
1304 tbd->addr = cpu_to_le64(map);
1305
1306
1307
1308
1309
1310 tbd->len =
1311 cpu_to_le32((sizeof(struct tx_buf_desc) *
1312 (frag_cnt - frag_idx)) | TX_DESC_C);
1313 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1314 map);
1315 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1316 sizeof(struct qlge_oal));
1317 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1318 map_idx++;
1319 }
1320
1321 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1322 DMA_TO_DEVICE);
1323
1324 err = dma_mapping_error(&qdev->pdev->dev, map);
1325 if (err) {
1326 netif_err(qdev, tx_queued, qdev->ndev,
1327 "PCI mapping frags failed with error: %d.\n",
1328 err);
1329 goto map_error;
1330 }
1331
1332 tbd->addr = cpu_to_le64(map);
1333 tbd->len = cpu_to_le32(skb_frag_size(frag));
1334 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1335 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1336 skb_frag_size(frag));
1337 }
1338
1339 tx_ring_desc->map_cnt = map_idx;
1340
1341 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1342 return NETDEV_TX_OK;
1343
1344 map_error:
1345
1346
1347
1348
1349
1350
1351 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1352 return NETDEV_TX_BUSY;
1353 }
1354
1355
1356 static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1357 struct rx_ring *rx_ring)
1358 {
1359 struct nic_stats *stats = &qdev->nic_stats;
1360
1361 stats->rx_err_count++;
1362 rx_ring->rx_errors++;
1363
1364 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1365 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1366 stats->rx_code_err++;
1367 break;
1368 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1369 stats->rx_oversize_err++;
1370 break;
1371 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1372 stats->rx_undersize_err++;
1373 break;
1374 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1375 stats->rx_preamble_err++;
1376 break;
1377 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1378 stats->rx_frame_len_err++;
1379 break;
1380 case IB_MAC_IOCB_RSP_ERR_CRC:
1381 stats->rx_crc_err++;
1382 break;
1383 default:
1384 break;
1385 }
1386 }
1387
1388
1389
1390
1391
1392 static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1393 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1394 void *page, size_t *len)
1395 {
1396 u16 *tags;
1397
1398 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1399 return;
1400 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1401 tags = (u16 *)page;
1402
1403 if (tags[6] == ETH_P_8021Q &&
1404 tags[8] == ETH_P_8021Q)
1405 *len += 2 * VLAN_HLEN;
1406 else
1407 *len += VLAN_HLEN;
1408 }
1409 }
1410
1411
1412 static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1413 struct rx_ring *rx_ring,
1414 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1415 u32 length, u16 vlan_id)
1416 {
1417 struct sk_buff *skb;
1418 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1419 struct napi_struct *napi = &rx_ring->napi;
1420
1421
1422 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1423 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1424 put_page(lbq_desc->p.pg_chunk.page);
1425 return;
1426 }
1427 napi->dev = qdev->ndev;
1428
1429 skb = napi_get_frags(napi);
1430 if (!skb) {
1431 netif_err(qdev, drv, qdev->ndev,
1432 "Couldn't get an skb, exiting.\n");
1433 rx_ring->rx_dropped++;
1434 put_page(lbq_desc->p.pg_chunk.page);
1435 return;
1436 }
1437 prefetch(lbq_desc->p.pg_chunk.va);
1438 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1439 lbq_desc->p.pg_chunk.page,
1440 lbq_desc->p.pg_chunk.offset,
1441 length);
1442
1443 skb->len += length;
1444 skb->data_len += length;
1445 skb->truesize += length;
1446 skb_shinfo(skb)->nr_frags++;
1447
1448 rx_ring->rx_packets++;
1449 rx_ring->rx_bytes += length;
1450 skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 skb_record_rx_queue(skb, rx_ring->cq_id);
1452 if (vlan_id != 0xffff)
1453 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1454 napi_gro_frags(napi);
1455 }
1456
1457
1458 static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1459 struct rx_ring *rx_ring,
1460 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1461 u32 length, u16 vlan_id)
1462 {
1463 struct net_device *ndev = qdev->ndev;
1464 struct sk_buff *skb = NULL;
1465 void *addr;
1466 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1467 struct napi_struct *napi = &rx_ring->napi;
1468 size_t hlen = ETH_HLEN;
1469
1470 skb = netdev_alloc_skb(ndev, length);
1471 if (!skb) {
1472 rx_ring->rx_dropped++;
1473 put_page(lbq_desc->p.pg_chunk.page);
1474 return;
1475 }
1476
1477 addr = lbq_desc->p.pg_chunk.va;
1478 prefetch(addr);
1479
1480
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1482 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1483 goto err_out;
1484 }
1485
1486
1487 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1488
1489
1490
1491
1492 if (skb->len > ndev->mtu + hlen) {
1493 netif_err(qdev, drv, qdev->ndev,
1494 "Segment too small, dropping.\n");
1495 rx_ring->rx_dropped++;
1496 goto err_out;
1497 }
1498 skb_put_data(skb, addr, hlen);
1499 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1500 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1501 length);
1502 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1503 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1504 skb->len += length - hlen;
1505 skb->data_len += length - hlen;
1506 skb->truesize += length - hlen;
1507
1508 rx_ring->rx_packets++;
1509 rx_ring->rx_bytes += skb->len;
1510 skb->protocol = eth_type_trans(skb, ndev);
1511 skb_checksum_none_assert(skb);
1512
1513 if ((ndev->features & NETIF_F_RXCSUM) &&
1514 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1515
1516 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1517 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1518 "TCP checksum done!\n");
1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1521 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1522
1523 struct iphdr *iph =
1524 (struct iphdr *)((u8 *)addr + hlen);
1525 if (!(iph->frag_off &
1526 htons(IP_MF | IP_OFFSET))) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 netif_printk(qdev, rx_status, KERN_DEBUG,
1529 qdev->ndev,
1530 "UDP checksum done!\n");
1531 }
1532 }
1533 }
1534
1535 skb_record_rx_queue(skb, rx_ring->cq_id);
1536 if (vlan_id != 0xffff)
1537 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1538 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1539 napi_gro_receive(napi, skb);
1540 else
1541 netif_receive_skb(skb);
1542 return;
1543 err_out:
1544 dev_kfree_skb_any(skb);
1545 put_page(lbq_desc->p.pg_chunk.page);
1546 }
1547
1548
1549 static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1550 struct rx_ring *rx_ring,
1551 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1552 u32 length, u16 vlan_id)
1553 {
1554 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1555 struct net_device *ndev = qdev->ndev;
1556 struct sk_buff *skb, *new_skb;
1557
1558 skb = sbq_desc->p.skb;
1559
1560 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1561 if (!new_skb) {
1562 rx_ring->rx_dropped++;
1563 return;
1564 }
1565 skb_reserve(new_skb, NET_IP_ALIGN);
1566
1567 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1568 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1569
1570 skb_put_data(new_skb, skb->data, length);
1571
1572 skb = new_skb;
1573
1574
1575 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1576 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1577 dev_kfree_skb_any(skb);
1578 return;
1579 }
1580
1581
1582 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1583 qlge_check_lb_frame(qdev, skb);
1584 dev_kfree_skb_any(skb);
1585 return;
1586 }
1587
1588
1589
1590
1591 if (skb->len > ndev->mtu + ETH_HLEN) {
1592 dev_kfree_skb_any(skb);
1593 rx_ring->rx_dropped++;
1594 return;
1595 }
1596
1597 prefetch(skb->data);
1598 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1599 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600 "%s Multicast.\n",
1601 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1602 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1603 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1604 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1607 }
1608 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1609 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1610 "Promiscuous Packet.\n");
1611
1612 rx_ring->rx_packets++;
1613 rx_ring->rx_bytes += skb->len;
1614 skb->protocol = eth_type_trans(skb, ndev);
1615 skb_checksum_none_assert(skb);
1616
1617
1618
1619
1620 if ((ndev->features & NETIF_F_RXCSUM) &&
1621 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1622
1623 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1624 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 "TCP checksum done!\n");
1626 skb->ip_summed = CHECKSUM_UNNECESSARY;
1627 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1628 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1629
1630 struct iphdr *iph = (struct iphdr *)skb->data;
1631
1632 if (!(iph->frag_off &
1633 htons(IP_MF | IP_OFFSET))) {
1634 skb->ip_summed = CHECKSUM_UNNECESSARY;
1635 netif_printk(qdev, rx_status, KERN_DEBUG,
1636 qdev->ndev,
1637 "UDP checksum done!\n");
1638 }
1639 }
1640 }
1641
1642 skb_record_rx_queue(skb, rx_ring->cq_id);
1643 if (vlan_id != 0xffff)
1644 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1645 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1646 napi_gro_receive(&rx_ring->napi, skb);
1647 else
1648 netif_receive_skb(skb);
1649 }
1650
1651 static void qlge_realign_skb(struct sk_buff *skb, int len)
1652 {
1653 void *temp_addr = skb->data;
1654
1655
1656
1657
1658
1659 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1660 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1661 memmove(skb->data, temp_addr, len);
1662 }
1663
1664
1665
1666
1667
1668
1669 static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1670 struct rx_ring *rx_ring,
1671 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1672 {
1673 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1674 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1675 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1676 struct sk_buff *skb = NULL;
1677 size_t hlen = ETH_HLEN;
1678
1679
1680
1681
1682 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1683 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1684 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1685 "Header of %d bytes in small buffer.\n", hdr_len);
1686
1687
1688
1689 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1690 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1691 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1692 skb = sbq_desc->p.skb;
1693 qlge_realign_skb(skb, hdr_len);
1694 skb_put(skb, hdr_len);
1695 sbq_desc->p.skb = NULL;
1696 }
1697
1698
1699
1700
1701 if (unlikely(!length)) {
1702 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1703 "No Data buffer in this packet.\n");
1704 return skb;
1705 }
1706
1707 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1708 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1709 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1710 "Headers in small, data of %d bytes in small, combine them.\n",
1711 length);
1712
1713
1714
1715
1716
1717
1718
1719 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1720 dma_sync_single_for_cpu(&qdev->pdev->dev,
1721 sbq_desc->dma_addr,
1722 SMALL_BUF_MAP_SIZE,
1723 DMA_FROM_DEVICE);
1724 skb_put_data(skb, sbq_desc->p.skb->data, length);
1725 } else {
1726 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1727 "%d bytes in a single small buffer.\n",
1728 length);
1729 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1730 skb = sbq_desc->p.skb;
1731 qlge_realign_skb(skb, length);
1732 skb_put(skb, length);
1733 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1734 SMALL_BUF_MAP_SIZE,
1735 DMA_FROM_DEVICE);
1736 sbq_desc->p.skb = NULL;
1737 }
1738 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "Header in small, %d bytes in large. Chain large to small!\n",
1742 length);
1743
1744
1745
1746
1747
1748 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1749 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1750 "Chaining page at offset = %d, for %d bytes to skb.\n",
1751 lbq_desc->p.pg_chunk.offset, length);
1752 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1753 lbq_desc->p.pg_chunk.offset, length);
1754 skb->len += length;
1755 skb->data_len += length;
1756 skb->truesize += length;
1757 } else {
1758
1759
1760
1761
1762
1763 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1764 skb = netdev_alloc_skb(qdev->ndev, length);
1765 if (!skb) {
1766 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1767 "No skb available, drop the packet.\n");
1768 return NULL;
1769 }
1770 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1771 qdev->lbq_buf_size,
1772 DMA_FROM_DEVICE);
1773 skb_reserve(skb, NET_IP_ALIGN);
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1776 length);
1777 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1778 lbq_desc->p.pg_chunk.offset,
1779 length);
1780 skb->len += length;
1781 skb->data_len += length;
1782 skb->truesize += length;
1783 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1784 lbq_desc->p.pg_chunk.va,
1785 &hlen);
1786 __pskb_pull_tail(skb, hlen);
1787 }
1788 } else {
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800 int size, i = 0;
1801
1802 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1803 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1804 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1805 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1816 "%d bytes of headers & data in chain of large.\n",
1817 length);
1818 skb = sbq_desc->p.skb;
1819 sbq_desc->p.skb = NULL;
1820 skb_reserve(skb, NET_IP_ALIGN);
1821 }
1822 do {
1823 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1824 size = min(length, qdev->lbq_buf_size);
1825
1826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 "Adding page %d to skb for %d bytes.\n",
1828 i, size);
1829 skb_fill_page_desc(skb, i,
1830 lbq_desc->p.pg_chunk.page,
1831 lbq_desc->p.pg_chunk.offset, size);
1832 skb->len += size;
1833 skb->data_len += size;
1834 skb->truesize += size;
1835 length -= size;
1836 i++;
1837 } while (length > 0);
1838 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1839 &hlen);
1840 __pskb_pull_tail(skb, hlen);
1841 }
1842 return skb;
1843 }
1844
1845
1846 static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1847 struct rx_ring *rx_ring,
1848 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1849 u16 vlan_id)
1850 {
1851 struct net_device *ndev = qdev->ndev;
1852 struct sk_buff *skb = NULL;
1853
1854 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1855 if (unlikely(!skb)) {
1856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop packet.\n");
1858 rx_ring->rx_dropped++;
1859 return;
1860 }
1861
1862
1863 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1864 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1865 dev_kfree_skb_any(skb);
1866 return;
1867 }
1868
1869
1870
1871
1872 if (skb->len > ndev->mtu + ETH_HLEN) {
1873 dev_kfree_skb_any(skb);
1874 rx_ring->rx_dropped++;
1875 return;
1876 }
1877
1878
1879 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1880 qlge_check_lb_frame(qdev, skb);
1881 dev_kfree_skb_any(skb);
1882 return;
1883 }
1884
1885 prefetch(skb->data);
1886 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1887 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1888 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1889 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1890 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1891 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1894 rx_ring->rx_multicast++;
1895 }
1896 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1897 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1898 "Promiscuous Packet.\n");
1899 }
1900
1901 skb->protocol = eth_type_trans(skb, ndev);
1902 skb_checksum_none_assert(skb);
1903
1904
1905
1906
1907 if ((ndev->features & NETIF_F_RXCSUM) &&
1908 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1909
1910 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1911 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912 "TCP checksum done!\n");
1913 skb->ip_summed = CHECKSUM_UNNECESSARY;
1914 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1915 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1916
1917 struct iphdr *iph = (struct iphdr *)skb->data;
1918
1919 if (!(iph->frag_off &
1920 htons(IP_MF | IP_OFFSET))) {
1921 skb->ip_summed = CHECKSUM_UNNECESSARY;
1922 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923 "TCP checksum done!\n");
1924 }
1925 }
1926 }
1927
1928 rx_ring->rx_packets++;
1929 rx_ring->rx_bytes += skb->len;
1930 skb_record_rx_queue(skb, rx_ring->cq_id);
1931 if (vlan_id != 0xffff)
1932 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1933 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1934 napi_gro_receive(&rx_ring->napi, skb);
1935 else
1936 netif_receive_skb(skb);
1937 }
1938
1939
1940 static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1941 struct rx_ring *rx_ring,
1942 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1943 {
1944 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1945 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1946 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1947 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1948 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1949
1950 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1951
1952
1953
1954 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1955 vlan_id);
1956 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1957
1958
1959
1960
1961 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1962 vlan_id);
1963 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1964 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1965 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1966
1967
1968
1969 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1970 vlan_id);
1971 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1972
1973
1974
1975 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1976 vlan_id);
1977 } else {
1978
1979
1980
1981 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1982 vlan_id);
1983 }
1984
1985 return (unsigned long)length;
1986 }
1987
1988
1989 static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1990 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1991 {
1992 struct tx_ring *tx_ring;
1993 struct tx_ring_desc *tx_ring_desc;
1994
1995 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1996 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1997 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1998 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1999 tx_ring->tx_packets++;
2000 dev_kfree_skb(tx_ring_desc->skb);
2001 tx_ring_desc->skb = NULL;
2002
2003 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2004 OB_MAC_IOCB_RSP_S |
2005 OB_MAC_IOCB_RSP_L |
2006 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2007 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2008 netif_warn(qdev, tx_done, qdev->ndev,
2009 "Total descriptor length did not match transfer length.\n");
2010 }
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Frame too short to be valid, not sent.\n");
2014 }
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too long, but sent anyway.\n");
2018 }
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "PCI backplane error. Frame not sent.\n");
2022 }
2023 }
2024 atomic_inc(&tx_ring->tx_count);
2025 }
2026
2027
2028 void qlge_queue_fw_error(struct qlge_adapter *qdev)
2029 {
2030 qlge_link_off(qdev);
2031 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2032 }
2033
2034 void qlge_queue_asic_error(struct qlge_adapter *qdev)
2035 {
2036 qlge_link_off(qdev);
2037 qlge_disable_interrupts(qdev);
2038
2039
2040
2041
2042 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2043
2044
2045
2046 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2047 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2048 }
2049
2050 static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2051 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2052 {
2053 switch (ib_ae_rsp->event) {
2054 case MGMT_ERR_EVENT:
2055 netif_err(qdev, rx_err, qdev->ndev,
2056 "Management Processor Fatal Error.\n");
2057 qlge_queue_fw_error(qdev);
2058 return;
2059
2060 case CAM_LOOKUP_ERR_EVENT:
2061 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2062 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2063 qlge_queue_asic_error(qdev);
2064 return;
2065
2066 case SOFT_ECC_ERROR_EVENT:
2067 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2068 qlge_queue_asic_error(qdev);
2069 break;
2070
2071 case PCI_ERR_ANON_BUF_RD:
2072 netdev_err(qdev->ndev,
2073 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2074 ib_ae_rsp->q_id);
2075 qlge_queue_asic_error(qdev);
2076 break;
2077
2078 default:
2079 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2080 ib_ae_rsp->event);
2081 qlge_queue_asic_error(qdev);
2082 break;
2083 }
2084 }
2085
2086 static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2087 {
2088 struct qlge_adapter *qdev = rx_ring->qdev;
2089 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2090 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2091 int count = 0;
2092
2093 struct tx_ring *tx_ring;
2094
2095 while (prod != rx_ring->cnsmr_idx) {
2096 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2097 "cq_id = %d, prod = %d, cnsmr = %d\n",
2098 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2099
2100 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2101 rmb();
2102 switch (net_rsp->opcode) {
2103 case OPCODE_OB_MAC_TSO_IOCB:
2104 case OPCODE_OB_MAC_IOCB:
2105 qlge_process_mac_tx_intr(qdev, net_rsp);
2106 break;
2107 default:
2108 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2109 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2110 net_rsp->opcode);
2111 }
2112 count++;
2113 qlge_update_cq(rx_ring);
2114 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2115 }
2116 if (!net_rsp)
2117 return 0;
2118 qlge_write_cq_idx(rx_ring);
2119 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2120 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2121 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2122
2123
2124
2125
2126 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2127 }
2128
2129 return count;
2130 }
2131
2132 static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2133 {
2134 struct qlge_adapter *qdev = rx_ring->qdev;
2135 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2136 struct qlge_net_rsp_iocb *net_rsp;
2137 int count = 0;
2138
2139
2140 while (prod != rx_ring->cnsmr_idx) {
2141 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2142 "cq_id = %d, prod = %d, cnsmr = %d\n",
2143 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2144
2145 net_rsp = rx_ring->curr_entry;
2146 rmb();
2147 switch (net_rsp->opcode) {
2148 case OPCODE_IB_MAC_IOCB:
2149 qlge_process_mac_rx_intr(qdev, rx_ring,
2150 (struct qlge_ib_mac_iocb_rsp *)
2151 net_rsp);
2152 break;
2153
2154 case OPCODE_IB_AE_IOCB:
2155 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2156 net_rsp);
2157 break;
2158 default:
2159 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2160 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2161 net_rsp->opcode);
2162 break;
2163 }
2164 count++;
2165 qlge_update_cq(rx_ring);
2166 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2167 if (count == budget)
2168 break;
2169 }
2170 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2171 qlge_write_cq_idx(rx_ring);
2172 return count;
2173 }
2174
2175 static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2176 {
2177 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2178 struct qlge_adapter *qdev = rx_ring->qdev;
2179 struct rx_ring *trx_ring;
2180 int i, work_done = 0;
2181 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2182
2183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2184 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2185
2186
2187
2188
2189 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2190 trx_ring = &qdev->rx_ring[i];
2191
2192
2193
2194 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2195 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2196 trx_ring->cnsmr_idx)) {
2197 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2198 "%s: Servicing TX completion ring %d.\n",
2199 __func__, trx_ring->cq_id);
2200 qlge_clean_outbound_rx_ring(trx_ring);
2201 }
2202 }
2203
2204
2205
2206
2207 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2208 rx_ring->cnsmr_idx) {
2209 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2210 "%s: Servicing RX completion ring %d.\n",
2211 __func__, rx_ring->cq_id);
2212 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2213 }
2214
2215 if (work_done < budget) {
2216 napi_complete_done(napi, work_done);
2217 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2218 }
2219 return work_done;
2220 }
2221
2222 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2223 {
2224 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2225
2226 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2227 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2228 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2229 } else {
2230 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2231 }
2232 }
2233
2234
2235
2236
2237
2238 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2239 netdev_features_t features)
2240 {
2241 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2242 bool need_restart = netif_running(ndev);
2243 int status = 0;
2244
2245 if (need_restart) {
2246 status = qlge_adapter_down(qdev);
2247 if (status) {
2248 netif_err(qdev, link, qdev->ndev,
2249 "Failed to bring down the adapter\n");
2250 return status;
2251 }
2252 }
2253
2254
2255 ndev->features = features;
2256
2257 if (need_restart) {
2258 status = qlge_adapter_up(qdev);
2259 if (status) {
2260 netif_err(qdev, link, qdev->ndev,
2261 "Failed to bring up the adapter\n");
2262 return status;
2263 }
2264 }
2265
2266 return status;
2267 }
2268
2269 static int qlge_set_features(struct net_device *ndev,
2270 netdev_features_t features)
2271 {
2272 netdev_features_t changed = ndev->features ^ features;
2273 int err;
2274
2275 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2276
2277 err = qlge_update_hw_vlan_features(ndev, features);
2278 if (err)
2279 return err;
2280
2281 qlge_vlan_mode(ndev, features);
2282 }
2283
2284 return 0;
2285 }
2286
2287 static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2288 {
2289 u32 enable_bit = MAC_ADDR_E;
2290 int err;
2291
2292 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2293 MAC_ADDR_TYPE_VLAN, vid);
2294 if (err)
2295 netif_err(qdev, ifup, qdev->ndev,
2296 "Failed to init vlan address.\n");
2297 return err;
2298 }
2299
2300 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2301 {
2302 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2303 int status;
2304 int err;
2305
2306 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2307 if (status)
2308 return status;
2309
2310 err = __qlge_vlan_rx_add_vid(qdev, vid);
2311 set_bit(vid, qdev->active_vlans);
2312
2313 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2314
2315 return err;
2316 }
2317
2318 static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2319 {
2320 u32 enable_bit = 0;
2321 int err;
2322
2323 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2324 MAC_ADDR_TYPE_VLAN, vid);
2325 if (err)
2326 netif_err(qdev, ifup, qdev->ndev,
2327 "Failed to clear vlan address.\n");
2328 return err;
2329 }
2330
2331 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2332 {
2333 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2334 int status;
2335 int err;
2336
2337 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2338 if (status)
2339 return status;
2340
2341 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2342 clear_bit(vid, qdev->active_vlans);
2343
2344 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2345
2346 return err;
2347 }
2348
2349 static void qlge_restore_vlan(struct qlge_adapter *qdev)
2350 {
2351 int status;
2352 u16 vid;
2353
2354 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2355 if (status)
2356 return;
2357
2358 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2359 __qlge_vlan_rx_add_vid(qdev, vid);
2360
2361 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2362 }
2363
2364
2365 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2366 {
2367 struct rx_ring *rx_ring = dev_id;
2368
2369 napi_schedule(&rx_ring->napi);
2370 return IRQ_HANDLED;
2371 }
2372
2373
2374
2375
2376
2377
2378 static irqreturn_t qlge_isr(int irq, void *dev_id)
2379 {
2380 struct rx_ring *rx_ring = dev_id;
2381 struct qlge_adapter *qdev = rx_ring->qdev;
2382 struct intr_context *intr_context = &qdev->intr_context[0];
2383 u32 var;
2384 int work_done = 0;
2385
2386
2387
2388
2389
2390
2391
2392 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2393 qlge_disable_completion_interrupt(qdev, 0);
2394
2395 var = qlge_read32(qdev, STS);
2396
2397
2398
2399
2400 if (var & STS_FE) {
2401 qlge_disable_completion_interrupt(qdev, 0);
2402 qlge_queue_asic_error(qdev);
2403 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2404 var = qlge_read32(qdev, ERR_STS);
2405 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2406 return IRQ_HANDLED;
2407 }
2408
2409
2410
2411
2412 if ((var & STS_PI) &&
2413 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2414
2415
2416
2417
2418 netif_err(qdev, intr, qdev->ndev,
2419 "Got MPI processor interrupt.\n");
2420 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2421 queue_delayed_work_on(smp_processor_id(),
2422 qdev->workqueue, &qdev->mpi_work, 0);
2423 work_done++;
2424 }
2425
2426
2427
2428
2429
2430
2431 var = qlge_read32(qdev, ISR1);
2432 if (var & intr_context->irq_mask) {
2433 netif_info(qdev, intr, qdev->ndev,
2434 "Waking handler for rx_ring[0].\n");
2435 napi_schedule(&rx_ring->napi);
2436 work_done++;
2437 } else {
2438
2439
2440
2441
2442
2443
2444 qlge_enable_completion_interrupt(qdev, 0);
2445 }
2446
2447 return work_done ? IRQ_HANDLED : IRQ_NONE;
2448 }
2449
2450 static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2451 {
2452 if (skb_is_gso(skb)) {
2453 int err;
2454 __be16 l3_proto = vlan_get_protocol(skb);
2455
2456 err = skb_cow_head(skb, 0);
2457 if (err < 0)
2458 return err;
2459
2460 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2461 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2462 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2463 mac_iocb_ptr->total_hdrs_len =
2464 cpu_to_le16(skb_tcp_all_headers(skb));
2465 mac_iocb_ptr->net_trans_offset =
2466 cpu_to_le16(skb_network_offset(skb) |
2467 skb_transport_offset(skb)
2468 << OB_MAC_TRANSPORT_HDR_SHIFT);
2469 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2470 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2471 if (likely(l3_proto == htons(ETH_P_IP))) {
2472 struct iphdr *iph = ip_hdr(skb);
2473
2474 iph->check = 0;
2475 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2476 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2477 iph->daddr, 0,
2478 IPPROTO_TCP,
2479 0);
2480 } else if (l3_proto == htons(ETH_P_IPV6)) {
2481 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2482 tcp_hdr(skb)->check =
2483 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2484 &ipv6_hdr(skb)->daddr,
2485 0, IPPROTO_TCP, 0);
2486 }
2487 return 1;
2488 }
2489 return 0;
2490 }
2491
2492 static void qlge_hw_csum_setup(struct sk_buff *skb,
2493 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2494 {
2495 int len;
2496 struct iphdr *iph = ip_hdr(skb);
2497 __sum16 *check;
2498
2499 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2500 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2501 mac_iocb_ptr->net_trans_offset =
2502 cpu_to_le16(skb_network_offset(skb) |
2503 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2504
2505 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2506 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2507 if (likely(iph->protocol == IPPROTO_TCP)) {
2508 check = &(tcp_hdr(skb)->check);
2509 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2510 mac_iocb_ptr->total_hdrs_len =
2511 cpu_to_le16(skb_transport_offset(skb) +
2512 (tcp_hdr(skb)->doff << 2));
2513 } else {
2514 check = &(udp_hdr(skb)->check);
2515 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2516 mac_iocb_ptr->total_hdrs_len =
2517 cpu_to_le16(skb_transport_offset(skb) +
2518 sizeof(struct udphdr));
2519 }
2520 *check = ~csum_tcpudp_magic(iph->saddr,
2521 iph->daddr, len, iph->protocol, 0);
2522 }
2523
2524 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2525 {
2526 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2527 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2528 struct tx_ring_desc *tx_ring_desc;
2529 int tso;
2530 struct tx_ring *tx_ring;
2531 u32 tx_ring_idx = (u32)skb->queue_mapping;
2532
2533 tx_ring = &qdev->tx_ring[tx_ring_idx];
2534
2535 if (skb_padto(skb, ETH_ZLEN))
2536 return NETDEV_TX_OK;
2537
2538 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2539 netif_info(qdev, tx_queued, qdev->ndev,
2540 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2541 __func__, tx_ring_idx);
2542 netif_stop_subqueue(ndev, tx_ring->wq_id);
2543 tx_ring->tx_errors++;
2544 return NETDEV_TX_BUSY;
2545 }
2546 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2547 mac_iocb_ptr = tx_ring_desc->queue_entry;
2548 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2549
2550 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2551 mac_iocb_ptr->tid = tx_ring_desc->index;
2552
2553
2554
2555 mac_iocb_ptr->txq_idx = tx_ring_idx;
2556 tx_ring_desc->skb = skb;
2557
2558 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2559
2560 if (skb_vlan_tag_present(skb)) {
2561 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2562 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2563 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2564 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2565 }
2566 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2567 if (tso < 0) {
2568 dev_kfree_skb_any(skb);
2569 return NETDEV_TX_OK;
2570 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2571 qlge_hw_csum_setup(skb,
2572 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2573 }
2574 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2575 NETDEV_TX_OK) {
2576 netif_err(qdev, tx_queued, qdev->ndev,
2577 "Could not map the segments.\n");
2578 tx_ring->tx_errors++;
2579 return NETDEV_TX_BUSY;
2580 }
2581
2582 tx_ring->prod_idx++;
2583 if (tx_ring->prod_idx == tx_ring->wq_len)
2584 tx_ring->prod_idx = 0;
2585 wmb();
2586
2587 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "tx queued, slot %d, len %d\n",
2590 tx_ring->prod_idx, skb->len);
2591
2592 atomic_dec(&tx_ring->tx_count);
2593
2594 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595 netif_stop_subqueue(ndev, tx_ring->wq_id);
2596 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2597
2598
2599
2600
2601 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2602 }
2603 return NETDEV_TX_OK;
2604 }
2605
2606 static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2607 {
2608 if (qdev->rx_ring_shadow_reg_area) {
2609 dma_free_coherent(&qdev->pdev->dev,
2610 PAGE_SIZE,
2611 qdev->rx_ring_shadow_reg_area,
2612 qdev->rx_ring_shadow_reg_dma);
2613 qdev->rx_ring_shadow_reg_area = NULL;
2614 }
2615 if (qdev->tx_ring_shadow_reg_area) {
2616 dma_free_coherent(&qdev->pdev->dev,
2617 PAGE_SIZE,
2618 qdev->tx_ring_shadow_reg_area,
2619 qdev->tx_ring_shadow_reg_dma);
2620 qdev->tx_ring_shadow_reg_area = NULL;
2621 }
2622 }
2623
2624 static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2625 {
2626 qdev->rx_ring_shadow_reg_area =
2627 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2628 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2629 if (!qdev->rx_ring_shadow_reg_area) {
2630 netif_err(qdev, ifup, qdev->ndev,
2631 "Allocation of RX shadow space failed.\n");
2632 return -ENOMEM;
2633 }
2634
2635 qdev->tx_ring_shadow_reg_area =
2636 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2637 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2638 if (!qdev->tx_ring_shadow_reg_area) {
2639 netif_err(qdev, ifup, qdev->ndev,
2640 "Allocation of TX shadow space failed.\n");
2641 goto err_wqp_sh_area;
2642 }
2643 return 0;
2644
2645 err_wqp_sh_area:
2646 dma_free_coherent(&qdev->pdev->dev,
2647 PAGE_SIZE,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2650 return -ENOMEM;
2651 }
2652
2653 static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2654 {
2655 struct tx_ring_desc *tx_ring_desc;
2656 int i;
2657 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2658
2659 mac_iocb_ptr = tx_ring->wq_base;
2660 tx_ring_desc = tx_ring->q;
2661 for (i = 0; i < tx_ring->wq_len; i++) {
2662 tx_ring_desc->index = i;
2663 tx_ring_desc->skb = NULL;
2664 tx_ring_desc->queue_entry = mac_iocb_ptr;
2665 mac_iocb_ptr++;
2666 tx_ring_desc++;
2667 }
2668 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2669 }
2670
2671 static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2672 struct tx_ring *tx_ring)
2673 {
2674 if (tx_ring->wq_base) {
2675 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2676 tx_ring->wq_base, tx_ring->wq_base_dma);
2677 tx_ring->wq_base = NULL;
2678 }
2679 kfree(tx_ring->q);
2680 tx_ring->q = NULL;
2681 }
2682
2683 static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2684 struct tx_ring *tx_ring)
2685 {
2686 tx_ring->wq_base =
2687 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2688 &tx_ring->wq_base_dma, GFP_ATOMIC);
2689
2690 if (!tx_ring->wq_base ||
2691 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2692 goto pci_alloc_err;
2693
2694 tx_ring->q =
2695 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2696 GFP_KERNEL);
2697 if (!tx_ring->q)
2698 goto err;
2699
2700 return 0;
2701 err:
2702 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2703 tx_ring->wq_base, tx_ring->wq_base_dma);
2704 tx_ring->wq_base = NULL;
2705 pci_alloc_err:
2706 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2707 return -ENOMEM;
2708 }
2709
2710 static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2711 {
2712 struct qlge_bq *lbq = &rx_ring->lbq;
2713 unsigned int last_offset;
2714
2715 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2716 while (lbq->next_to_clean != lbq->next_to_use) {
2717 struct qlge_bq_desc *lbq_desc =
2718 &lbq->queue[lbq->next_to_clean];
2719
2720 if (lbq_desc->p.pg_chunk.offset == last_offset)
2721 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2722 qlge_lbq_block_size(qdev),
2723 DMA_FROM_DEVICE);
2724 put_page(lbq_desc->p.pg_chunk.page);
2725
2726 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2727 }
2728
2729 if (rx_ring->master_chunk.page) {
2730 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2731 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2732 put_page(rx_ring->master_chunk.page);
2733 rx_ring->master_chunk.page = NULL;
2734 }
2735 }
2736
2737 static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2738 {
2739 int i;
2740
2741 for (i = 0; i < QLGE_BQ_LEN; i++) {
2742 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2743
2744 if (!sbq_desc) {
2745 netif_err(qdev, ifup, qdev->ndev,
2746 "sbq_desc %d is NULL.\n", i);
2747 return;
2748 }
2749 if (sbq_desc->p.skb) {
2750 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2751 SMALL_BUF_MAP_SIZE,
2752 DMA_FROM_DEVICE);
2753 dev_kfree_skb(sbq_desc->p.skb);
2754 sbq_desc->p.skb = NULL;
2755 }
2756 }
2757 }
2758
2759
2760
2761
2762 static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2763 {
2764 int i;
2765
2766 for (i = 0; i < qdev->rx_ring_count; i++) {
2767 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2768
2769 if (rx_ring->lbq.queue)
2770 qlge_free_lbq_buffers(qdev, rx_ring);
2771 if (rx_ring->sbq.queue)
2772 qlge_free_sbq_buffers(qdev, rx_ring);
2773 }
2774 }
2775
2776 static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2777 {
2778 int i;
2779
2780 for (i = 0; i < qdev->rss_ring_count; i++)
2781 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2782 HZ / 2);
2783 }
2784
2785 static int qlge_init_bq(struct qlge_bq *bq)
2786 {
2787 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2788 struct qlge_adapter *qdev = rx_ring->qdev;
2789 struct qlge_bq_desc *bq_desc;
2790 __le64 *buf_ptr;
2791 int i;
2792
2793 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2794 &bq->base_dma, GFP_ATOMIC);
2795 if (!bq->base)
2796 return -ENOMEM;
2797
2798 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2799 GFP_KERNEL);
2800 if (!bq->queue)
2801 return -ENOMEM;
2802
2803 buf_ptr = bq->base;
2804 bq_desc = &bq->queue[0];
2805 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2806 bq_desc->p.skb = NULL;
2807 bq_desc->index = i;
2808 bq_desc->buf_ptr = buf_ptr;
2809 }
2810
2811 return 0;
2812 }
2813
2814 static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2815 struct rx_ring *rx_ring)
2816 {
2817
2818 if (rx_ring->sbq.base) {
2819 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2820 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2821 rx_ring->sbq.base = NULL;
2822 }
2823
2824
2825 kfree(rx_ring->sbq.queue);
2826 rx_ring->sbq.queue = NULL;
2827
2828
2829 if (rx_ring->lbq.base) {
2830 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2831 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2832 rx_ring->lbq.base = NULL;
2833 }
2834
2835
2836 kfree(rx_ring->lbq.queue);
2837 rx_ring->lbq.queue = NULL;
2838
2839
2840 if (rx_ring->cq_base) {
2841 dma_free_coherent(&qdev->pdev->dev,
2842 rx_ring->cq_size,
2843 rx_ring->cq_base, rx_ring->cq_base_dma);
2844 rx_ring->cq_base = NULL;
2845 }
2846 }
2847
2848
2849
2850
2851 static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2852 struct rx_ring *rx_ring)
2853 {
2854
2855
2856
2857 rx_ring->cq_base =
2858 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2859 &rx_ring->cq_base_dma, GFP_ATOMIC);
2860
2861 if (!rx_ring->cq_base) {
2862 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2863 return -ENOMEM;
2864 }
2865
2866 if (rx_ring->cq_id < qdev->rss_ring_count &&
2867 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2868 qlge_free_rx_resources(qdev, rx_ring);
2869 return -ENOMEM;
2870 }
2871
2872 return 0;
2873 }
2874
2875 static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2876 {
2877 struct tx_ring *tx_ring;
2878 struct tx_ring_desc *tx_ring_desc;
2879 int i, j;
2880
2881
2882
2883
2884
2885 for (j = 0; j < qdev->tx_ring_count; j++) {
2886 tx_ring = &qdev->tx_ring[j];
2887 for (i = 0; i < tx_ring->wq_len; i++) {
2888 tx_ring_desc = &tx_ring->q[i];
2889 if (tx_ring_desc && tx_ring_desc->skb) {
2890 netif_err(qdev, ifdown, qdev->ndev,
2891 "Freeing lost SKB %p, from queue %d, index %d.\n",
2892 tx_ring_desc->skb, j,
2893 tx_ring_desc->index);
2894 qlge_unmap_send(qdev, tx_ring_desc,
2895 tx_ring_desc->map_cnt);
2896 dev_kfree_skb(tx_ring_desc->skb);
2897 tx_ring_desc->skb = NULL;
2898 }
2899 }
2900 }
2901 }
2902
2903 static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2904 {
2905 int i;
2906
2907 for (i = 0; i < qdev->tx_ring_count; i++)
2908 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2909 for (i = 0; i < qdev->rx_ring_count; i++)
2910 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2911 qlge_free_shadow_space(qdev);
2912 }
2913
2914 static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2915 {
2916 int i;
2917
2918
2919 if (qlge_alloc_shadow_space(qdev))
2920 return -ENOMEM;
2921
2922 for (i = 0; i < qdev->rx_ring_count; i++) {
2923 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2924 netif_err(qdev, ifup, qdev->ndev,
2925 "RX resource allocation failed.\n");
2926 goto err_mem;
2927 }
2928 }
2929
2930 for (i = 0; i < qdev->tx_ring_count; i++) {
2931 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2932 netif_err(qdev, ifup, qdev->ndev,
2933 "TX resource allocation failed.\n");
2934 goto err_mem;
2935 }
2936 }
2937 return 0;
2938
2939 err_mem:
2940 qlge_free_mem_resources(qdev);
2941 return -ENOMEM;
2942 }
2943
2944
2945
2946
2947
2948 static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2949 {
2950 struct cqicb *cqicb = &rx_ring->cqicb;
2951 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2952 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2953 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2954 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2955 void __iomem *doorbell_area =
2956 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2957 int err = 0;
2958 u64 dma;
2959 __le64 *base_indirect_ptr;
2960 int page_entries;
2961
2962
2963 rx_ring->prod_idx_sh_reg = shadow_reg;
2964 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2965 *rx_ring->prod_idx_sh_reg = 0;
2966 shadow_reg += sizeof(u64);
2967 shadow_reg_dma += sizeof(u64);
2968 rx_ring->lbq.base_indirect = shadow_reg;
2969 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2970 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2971 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2972 rx_ring->sbq.base_indirect = shadow_reg;
2973 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2974
2975
2976 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2977 rx_ring->cnsmr_idx = 0;
2978 rx_ring->curr_entry = rx_ring->cq_base;
2979
2980
2981 rx_ring->valid_db_reg = doorbell_area + 0x04;
2982
2983
2984 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2985
2986
2987 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2988
2989 memset((void *)cqicb, 0, sizeof(struct cqicb));
2990 cqicb->msix_vect = rx_ring->irq;
2991
2992 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
2993 LEN_CPP_CONT);
2994
2995 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2996
2997 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2998
2999
3000
3001
3002 cqicb->flags = FLAGS_LC |
3003 FLAGS_LV |
3004 FLAGS_LI;
3005 if (rx_ring->cq_id < qdev->rss_ring_count) {
3006 cqicb->flags |= FLAGS_LL;
3007 dma = (u64)rx_ring->lbq.base_dma;
3008 base_indirect_ptr = rx_ring->lbq.base_indirect;
3009
3010 for (page_entries = 0;
3011 page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3012 page_entries++) {
3013 base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3014 dma += DB_PAGE_SIZE;
3015 }
3016 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3017 cqicb->lbq_buf_size =
3018 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3019 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3020 rx_ring->lbq.next_to_use = 0;
3021 rx_ring->lbq.next_to_clean = 0;
3022
3023 cqicb->flags |= FLAGS_LS;
3024 dma = (u64)rx_ring->sbq.base_dma;
3025 base_indirect_ptr = rx_ring->sbq.base_indirect;
3026
3027 for (page_entries = 0;
3028 page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3029 page_entries++) {
3030 base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3031 dma += DB_PAGE_SIZE;
3032 }
3033 cqicb->sbq_addr =
3034 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3035 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3036 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3037 rx_ring->sbq.next_to_use = 0;
3038 rx_ring->sbq.next_to_clean = 0;
3039 }
3040 if (rx_ring->cq_id < qdev->rss_ring_count) {
3041
3042
3043
3044 netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
3045 qlge_napi_poll_msix, 64);
3046 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3047 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3048 } else {
3049 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3050 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3051 }
3052 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3053 CFG_LCQ, rx_ring->cq_id);
3054 if (err) {
3055 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3056 return err;
3057 }
3058 return err;
3059 }
3060
3061 static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3062 {
3063 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3064 void __iomem *doorbell_area =
3065 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3066 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3067 (tx_ring->wq_id * sizeof(u64));
3068 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3069 (tx_ring->wq_id * sizeof(u64));
3070 int err = 0;
3071
3072
3073
3074
3075
3076 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3077 tx_ring->prod_idx = 0;
3078
3079 tx_ring->valid_db_reg = doorbell_area + 0x04;
3080
3081
3082
3083
3084 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3085 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3086
3087 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3088 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3089 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3090 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3091 wqicb->rid = 0;
3092 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3093
3094 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3095
3096 qlge_init_tx_ring(qdev, tx_ring);
3097
3098 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3099 (u16)tx_ring->wq_id);
3100 if (err) {
3101 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3102 return err;
3103 }
3104 return err;
3105 }
3106
3107 static void qlge_disable_msix(struct qlge_adapter *qdev)
3108 {
3109 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3110 pci_disable_msix(qdev->pdev);
3111 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3112 kfree(qdev->msi_x_entry);
3113 qdev->msi_x_entry = NULL;
3114 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3115 pci_disable_msi(qdev->pdev);
3116 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3117 }
3118 }
3119
3120
3121
3122
3123
3124 static void qlge_enable_msix(struct qlge_adapter *qdev)
3125 {
3126 int i, err;
3127
3128
3129 if (qlge_irq_type == MSIX_IRQ) {
3130
3131
3132
3133 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3134 sizeof(struct msix_entry),
3135 GFP_KERNEL);
3136 if (!qdev->msi_x_entry) {
3137 qlge_irq_type = MSI_IRQ;
3138 goto msi;
3139 }
3140
3141 for (i = 0; i < qdev->intr_count; i++)
3142 qdev->msi_x_entry[i].entry = i;
3143
3144 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3145 1, qdev->intr_count);
3146 if (err < 0) {
3147 kfree(qdev->msi_x_entry);
3148 qdev->msi_x_entry = NULL;
3149 netif_warn(qdev, ifup, qdev->ndev,
3150 "MSI-X Enable failed, trying MSI.\n");
3151 qlge_irq_type = MSI_IRQ;
3152 } else {
3153 qdev->intr_count = err;
3154 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3155 netif_info(qdev, ifup, qdev->ndev,
3156 "MSI-X Enabled, got %d vectors.\n",
3157 qdev->intr_count);
3158 return;
3159 }
3160 }
3161 msi:
3162 qdev->intr_count = 1;
3163 if (qlge_irq_type == MSI_IRQ) {
3164 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3165 set_bit(QL_MSI_ENABLED, &qdev->flags);
3166 netif_info(qdev, ifup, qdev->ndev,
3167 "Running with MSI interrupts.\n");
3168 return;
3169 }
3170 }
3171 qlge_irq_type = LEG_IRQ;
3172 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3173 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 "Running with legacy interrupts.\n");
3175 }
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186 static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3187 {
3188 int i, j, vect;
3189 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3190
3191 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3192
3193 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3194 i < qdev->rx_ring_count; i++) {
3195 if (j == tx_rings_per_vector) {
3196 vect++;
3197 j = 0;
3198 }
3199 qdev->rx_ring[i].irq = vect;
3200 j++;
3201 }
3202 } else {
3203
3204
3205
3206 for (i = 0; i < qdev->rx_ring_count; i++)
3207 qdev->rx_ring[i].irq = 0;
3208 }
3209 }
3210
3211
3212
3213
3214
3215
3216 static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3217 {
3218 int j, vect = ctx->intr;
3219 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3220
3221 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3222
3223
3224
3225 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3226
3227
3228
3229 for (j = 0; j < tx_rings_per_vector; j++) {
3230 ctx->irq_mask |=
3231 (1 << qdev->rx_ring[qdev->rss_ring_count +
3232 (vect * tx_rings_per_vector) + j].cq_id);
3233 }
3234 } else {
3235
3236
3237
3238 for (j = 0; j < qdev->rx_ring_count; j++)
3239 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3240 }
3241 }
3242
3243
3244
3245
3246
3247
3248
3249 static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3250 {
3251 int i = 0;
3252 struct intr_context *intr_context = &qdev->intr_context[0];
3253
3254 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3255
3256
3257
3258
3259 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3260 qdev->rx_ring[i].irq = i;
3261 intr_context->intr = i;
3262 intr_context->qdev = qdev;
3263
3264
3265
3266 qlge_set_irq_mask(qdev, intr_context);
3267
3268
3269
3270
3271 intr_context->intr_en_mask =
3272 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3273 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3274 | i;
3275 intr_context->intr_dis_mask =
3276 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3277 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3278 INTR_EN_IHD | i;
3279 intr_context->intr_read_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3282 i;
3283 if (i == 0) {
3284
3285
3286
3287
3288
3289 intr_context->handler = qlge_isr;
3290 sprintf(intr_context->name, "%s-rx-%d",
3291 qdev->ndev->name, i);
3292 } else {
3293
3294
3295
3296 intr_context->handler = qlge_msix_rx_isr;
3297 sprintf(intr_context->name, "%s-rx-%d",
3298 qdev->ndev->name, i);
3299 }
3300 }
3301 } else {
3302
3303
3304
3305
3306 intr_context->intr = 0;
3307 intr_context->qdev = qdev;
3308
3309
3310
3311
3312 intr_context->intr_en_mask =
3313 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3314 intr_context->intr_dis_mask =
3315 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3316 INTR_EN_TYPE_DISABLE;
3317 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3318
3319
3320
3321
3322
3323 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3324 INTR_EN_EI;
3325 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3326 }
3327 intr_context->intr_read_mask =
3328 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3329
3330
3331
3332 intr_context->handler = qlge_isr;
3333 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3334
3335
3336
3337
3338
3339 qlge_set_irq_mask(qdev, intr_context);
3340 }
3341
3342
3343
3344 qlge_set_tx_vect(qdev);
3345 }
3346
3347 static void qlge_free_irq(struct qlge_adapter *qdev)
3348 {
3349 int i;
3350 struct intr_context *intr_context = &qdev->intr_context[0];
3351
3352 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3353 if (intr_context->hooked) {
3354 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3355 free_irq(qdev->msi_x_entry[i].vector,
3356 &qdev->rx_ring[i]);
3357 } else {
3358 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3359 }
3360 }
3361 }
3362 qlge_disable_msix(qdev);
3363 }
3364
3365 static int qlge_request_irq(struct qlge_adapter *qdev)
3366 {
3367 int i;
3368 int status = 0;
3369 struct pci_dev *pdev = qdev->pdev;
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3371
3372 qlge_resolve_queues_to_irqs(qdev);
3373
3374 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3375 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3376 status = request_irq(qdev->msi_x_entry[i].vector,
3377 intr_context->handler,
3378 0,
3379 intr_context->name,
3380 &qdev->rx_ring[i]);
3381 if (status) {
3382 netif_err(qdev, ifup, qdev->ndev,
3383 "Failed request for MSIX interrupt %d.\n",
3384 i);
3385 goto err_irq;
3386 }
3387 } else {
3388 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3389 "trying msi or legacy interrupts.\n");
3390 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3391 "%s: irq = %d.\n", __func__, pdev->irq);
3392 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3393 "%s: context->name = %s.\n", __func__,
3394 intr_context->name);
3395 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3396 "%s: dev_id = 0x%p.\n", __func__,
3397 &qdev->rx_ring[0]);
3398 status =
3399 request_irq(pdev->irq, qlge_isr,
3400 test_bit(QL_MSI_ENABLED, &qdev->flags)
3401 ? 0
3402 : IRQF_SHARED,
3403 intr_context->name, &qdev->rx_ring[0]);
3404 if (status)
3405 goto err_irq;
3406
3407 netif_err(qdev, ifup, qdev->ndev,
3408 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3409 intr_context->name);
3410 }
3411 intr_context->hooked = 1;
3412 }
3413 return status;
3414 err_irq:
3415 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3416 qlge_free_irq(qdev);
3417 return status;
3418 }
3419
3420 static int qlge_start_rss(struct qlge_adapter *qdev)
3421 {
3422 static const u8 init_hash_seed[] = {
3423 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3424 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3425 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3426 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3427 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3428 };
3429 struct ricb *ricb = &qdev->ricb;
3430 int status = 0;
3431 int i;
3432 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3433
3434 memset((void *)ricb, 0, sizeof(*ricb));
3435
3436 ricb->base_cq = RSS_L4K;
3437 ricb->flags =
3438 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3439 ricb->mask = cpu_to_le16((u16)(0x3ff));
3440
3441
3442
3443
3444 for (i = 0; i < 1024; i++)
3445 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3446
3447 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3448 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3449
3450 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3451 if (status) {
3452 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3453 return status;
3454 }
3455 return status;
3456 }
3457
3458 static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3459 {
3460 int i, status = 0;
3461
3462 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3463 if (status)
3464 return status;
3465
3466 for (i = 0; i < 16; i++) {
3467 status = qlge_set_routing_reg(qdev, i, 0, 0);
3468 if (status) {
3469 netif_err(qdev, ifup, qdev->ndev,
3470 "Failed to init routing register for CAM packets.\n");
3471 break;
3472 }
3473 }
3474 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3475 return status;
3476 }
3477
3478
3479 static int qlge_route_initialize(struct qlge_adapter *qdev)
3480 {
3481 int status = 0;
3482
3483
3484 status = qlge_clear_routing_entries(qdev);
3485 if (status)
3486 return status;
3487
3488 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3489 if (status)
3490 return status;
3491
3492 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3493 RT_IDX_IP_CSUM_ERR, 1);
3494 if (status) {
3495 netif_err(qdev, ifup, qdev->ndev,
3496 "Failed to init routing register for IP CSUM error packets.\n");
3497 goto exit;
3498 }
3499 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3500 RT_IDX_TU_CSUM_ERR, 1);
3501 if (status) {
3502 netif_err(qdev, ifup, qdev->ndev,
3503 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3504 goto exit;
3505 }
3506 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3507 if (status) {
3508 netif_err(qdev, ifup, qdev->ndev,
3509 "Failed to init routing register for broadcast packets.\n");
3510 goto exit;
3511 }
3512
3513
3514
3515 if (qdev->rss_ring_count > 1) {
3516 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3517 RT_IDX_RSS_MATCH, 1);
3518 if (status) {
3519 netif_err(qdev, ifup, qdev->ndev,
3520 "Failed to init routing register for MATCH RSS packets.\n");
3521 goto exit;
3522 }
3523 }
3524
3525 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3526 RT_IDX_CAM_HIT, 1);
3527 if (status)
3528 netif_err(qdev, ifup, qdev->ndev,
3529 "Failed to init routing register for CAM packets.\n");
3530 exit:
3531 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3532 return status;
3533 }
3534
3535 int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3536 {
3537 int status, set;
3538
3539
3540
3541
3542
3543 set = qlge_read32(qdev, STS);
3544 set &= qdev->port_link_up;
3545 status = qlge_set_mac_addr(qdev, set);
3546 if (status) {
3547 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3548 return status;
3549 }
3550
3551 status = qlge_route_initialize(qdev);
3552 if (status)
3553 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3554
3555 return status;
3556 }
3557
3558 static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3559 {
3560 u32 value, mask;
3561 int i;
3562 int status = 0;
3563
3564
3565
3566
3567 value = SYS_EFE | SYS_FAE;
3568 mask = value << 16;
3569 qlge_write32(qdev, SYS, mask | value);
3570
3571
3572 value = NIC_RCV_CFG_DFQ;
3573 mask = NIC_RCV_CFG_DFQ_MASK;
3574 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3575 value |= NIC_RCV_CFG_RV;
3576 mask |= (NIC_RCV_CFG_RV << 16);
3577 }
3578 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3579
3580
3581 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3582
3583
3584 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3585 FSC_EC | FSC_VM_PAGE_4K;
3586 value |= SPLT_SETTING;
3587
3588
3589 mask = FSC_VM_PAGESIZE_MASK |
3590 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3591 qlge_write32(qdev, FSC, mask | value);
3592
3593 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3594
3595
3596
3597
3598
3599
3600 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3601
3602
3603
3604
3605 value = qlge_read32(qdev, MGMT_RCV_CFG);
3606 value &= ~MGMT_RCV_CFG_RM;
3607 mask = 0xffff0000;
3608
3609
3610 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3611 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3612
3613
3614 if (qdev->pdev->subsystem_device == 0x0068 ||
3615 qdev->pdev->subsystem_device == 0x0180)
3616 qdev->wol = WAKE_MAGIC;
3617
3618
3619 for (i = 0; i < qdev->rx_ring_count; i++) {
3620 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3621 if (status) {
3622 netif_err(qdev, ifup, qdev->ndev,
3623 "Failed to start rx ring[%d].\n", i);
3624 return status;
3625 }
3626 }
3627
3628
3629
3630
3631 if (qdev->rss_ring_count > 1) {
3632 status = qlge_start_rss(qdev);
3633 if (status) {
3634 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3635 return status;
3636 }
3637 }
3638
3639
3640 for (i = 0; i < qdev->tx_ring_count; i++) {
3641 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3642 if (status) {
3643 netif_err(qdev, ifup, qdev->ndev,
3644 "Failed to start tx ring[%d].\n", i);
3645 return status;
3646 }
3647 }
3648
3649
3650 status = qdev->nic_ops->port_initialize(qdev);
3651 if (status)
3652 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3653
3654
3655 status = qlge_cam_route_initialize(qdev);
3656 if (status) {
3657 netif_err(qdev, ifup, qdev->ndev,
3658 "Failed to init CAM/Routing tables.\n");
3659 return status;
3660 }
3661
3662
3663 for (i = 0; i < qdev->rss_ring_count; i++)
3664 napi_enable(&qdev->rx_ring[i].napi);
3665
3666 return status;
3667 }
3668
3669
3670 static int qlge_adapter_reset(struct qlge_adapter *qdev)
3671 {
3672 u32 value;
3673 int status = 0;
3674 unsigned long end_jiffies;
3675
3676
3677 status = qlge_clear_routing_entries(qdev);
3678 if (status) {
3679 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3680 return status;
3681 }
3682
3683
3684
3685
3686 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3687
3688 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3689
3690
3691 qlge_wait_fifo_empty(qdev);
3692 } else {
3693 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3694 }
3695
3696 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3697
3698 end_jiffies = jiffies + usecs_to_jiffies(30);
3699 do {
3700 value = qlge_read32(qdev, RST_FO);
3701 if ((value & RST_FO_FR) == 0)
3702 break;
3703 cpu_relax();
3704 } while (time_before(jiffies, end_jiffies));
3705
3706 if (value & RST_FO_FR) {
3707 netif_err(qdev, ifdown, qdev->ndev,
3708 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3709 status = -ETIMEDOUT;
3710 }
3711
3712
3713 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3714 return status;
3715 }
3716
3717 static void qlge_display_dev_info(struct net_device *ndev)
3718 {
3719 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3720
3721 netif_info(qdev, probe, qdev->ndev,
3722 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3723 qdev->func,
3724 qdev->port,
3725 qdev->chip_rev_id & 0x0000000f,
3726 qdev->chip_rev_id >> 4 & 0x0000000f,
3727 qdev->chip_rev_id >> 8 & 0x0000000f,
3728 qdev->chip_rev_id >> 12 & 0x0000000f);
3729 netif_info(qdev, probe, qdev->ndev,
3730 "MAC address %pM\n", ndev->dev_addr);
3731 }
3732
3733 static int qlge_wol(struct qlge_adapter *qdev)
3734 {
3735 int status = 0;
3736 u32 wol = MB_WOL_DISABLE;
3737
3738
3739
3740
3741
3742
3743
3744
3745 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3746 WAKE_MCAST | WAKE_BCAST)) {
3747 netif_err(qdev, ifdown, qdev->ndev,
3748 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3749 qdev->wol);
3750 return -EINVAL;
3751 }
3752
3753 if (qdev->wol & WAKE_MAGIC) {
3754 status = qlge_mb_wol_set_magic(qdev, 1);
3755 if (status) {
3756 netif_err(qdev, ifdown, qdev->ndev,
3757 "Failed to set magic packet on %s.\n",
3758 qdev->ndev->name);
3759 return status;
3760 }
3761 netif_info(qdev, drv, qdev->ndev,
3762 "Enabled magic packet successfully on %s.\n",
3763 qdev->ndev->name);
3764
3765 wol |= MB_WOL_MAGIC_PKT;
3766 }
3767
3768 if (qdev->wol) {
3769 wol |= MB_WOL_MODE_ON;
3770 status = qlge_mb_wol_mode(qdev, wol);
3771 netif_err(qdev, drv, qdev->ndev,
3772 "WOL %s (wol code 0x%x) on %s\n",
3773 (status == 0) ? "Successfully set" : "Failed",
3774 wol, qdev->ndev->name);
3775 }
3776
3777 return status;
3778 }
3779
3780 static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3781 {
3782
3783
3784
3785 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3786 cancel_delayed_work_sync(&qdev->asic_reset_work);
3787 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3788 cancel_delayed_work_sync(&qdev->mpi_work);
3789 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3790 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3791 }
3792
3793 static int qlge_adapter_down(struct qlge_adapter *qdev)
3794 {
3795 int i, status = 0;
3796
3797 qlge_link_off(qdev);
3798
3799 qlge_cancel_all_work_sync(qdev);
3800
3801 for (i = 0; i < qdev->rss_ring_count; i++)
3802 napi_disable(&qdev->rx_ring[i].napi);
3803
3804 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3805
3806 qlge_disable_interrupts(qdev);
3807
3808 qlge_tx_ring_clean(qdev);
3809
3810
3811 for (i = 0; i < qdev->rss_ring_count; i++)
3812 netif_napi_del(&qdev->rx_ring[i].napi);
3813
3814 status = qlge_adapter_reset(qdev);
3815 if (status)
3816 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3817 qdev->func);
3818 qlge_free_rx_buffers(qdev);
3819
3820 return status;
3821 }
3822
3823 static int qlge_adapter_up(struct qlge_adapter *qdev)
3824 {
3825 int err = 0;
3826
3827 err = qlge_adapter_initialize(qdev);
3828 if (err) {
3829 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3830 goto err_init;
3831 }
3832 set_bit(QL_ADAPTER_UP, &qdev->flags);
3833 qlge_alloc_rx_buffers(qdev);
3834
3835
3836
3837 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3838 (qlge_read32(qdev, STS) & qdev->port_link_up))
3839 qlge_link_on(qdev);
3840
3841 clear_bit(QL_ALLMULTI, &qdev->flags);
3842 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3843 qlge_set_multicast_list(qdev->ndev);
3844
3845
3846 qlge_restore_vlan(qdev);
3847
3848 qlge_enable_interrupts(qdev);
3849 qlge_enable_all_completion_interrupts(qdev);
3850 netif_tx_start_all_queues(qdev->ndev);
3851
3852 return 0;
3853 err_init:
3854 qlge_adapter_reset(qdev);
3855 return err;
3856 }
3857
3858 static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3859 {
3860 qlge_free_mem_resources(qdev);
3861 qlge_free_irq(qdev);
3862 }
3863
3864 static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3865 {
3866 if (qlge_alloc_mem_resources(qdev)) {
3867 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3868 return -ENOMEM;
3869 }
3870 return qlge_request_irq(qdev);
3871 }
3872
3873 static int qlge_close(struct net_device *ndev)
3874 {
3875 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3876 int i;
3877
3878
3879
3880
3881
3882 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3883 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3884 clear_bit(QL_EEH_FATAL, &qdev->flags);
3885 return 0;
3886 }
3887
3888
3889
3890
3891
3892 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3893 msleep(1);
3894
3895
3896 for (i = 0; i < qdev->rss_ring_count; i++)
3897 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3898
3899 qlge_adapter_down(qdev);
3900 qlge_release_adapter_resources(qdev);
3901 return 0;
3902 }
3903
3904 static void qlge_set_lb_size(struct qlge_adapter *qdev)
3905 {
3906 if (qdev->ndev->mtu <= 1500)
3907 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3908 else
3909 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3910 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3911 }
3912
3913 static int qlge_configure_rings(struct qlge_adapter *qdev)
3914 {
3915 int i;
3916 struct rx_ring *rx_ring;
3917 struct tx_ring *tx_ring;
3918 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3919
3920
3921
3922
3923
3924
3925
3926
3927 qdev->intr_count = cpu_cnt;
3928 qlge_enable_msix(qdev);
3929
3930 qdev->rss_ring_count = qdev->intr_count;
3931 qdev->tx_ring_count = cpu_cnt;
3932 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3933
3934 for (i = 0; i < qdev->tx_ring_count; i++) {
3935 tx_ring = &qdev->tx_ring[i];
3936 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3937 tx_ring->qdev = qdev;
3938 tx_ring->wq_id = i;
3939 tx_ring->wq_len = qdev->tx_ring_size;
3940 tx_ring->wq_size =
3941 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3942
3943
3944
3945
3946
3947 tx_ring->cq_id = qdev->rss_ring_count + i;
3948 }
3949
3950 for (i = 0; i < qdev->rx_ring_count; i++) {
3951 rx_ring = &qdev->rx_ring[i];
3952 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3953 rx_ring->qdev = qdev;
3954 rx_ring->cq_id = i;
3955 rx_ring->cpu = i % cpu_cnt;
3956 if (i < qdev->rss_ring_count) {
3957
3958
3959
3960 rx_ring->cq_len = qdev->rx_ring_size;
3961 rx_ring->cq_size =
3962 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3963 rx_ring->lbq.type = QLGE_LB;
3964 rx_ring->sbq.type = QLGE_SB;
3965 INIT_DELAYED_WORK(&rx_ring->refill_work,
3966 &qlge_slow_refill);
3967 } else {
3968
3969
3970
3971
3972 rx_ring->cq_len = qdev->tx_ring_size;
3973 rx_ring->cq_size =
3974 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3975 }
3976 }
3977 return 0;
3978 }
3979
3980 static int qlge_open(struct net_device *ndev)
3981 {
3982 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3983 int err = 0;
3984
3985 err = qlge_adapter_reset(qdev);
3986 if (err)
3987 return err;
3988
3989 qlge_set_lb_size(qdev);
3990 err = qlge_configure_rings(qdev);
3991 if (err)
3992 return err;
3993
3994 err = qlge_get_adapter_resources(qdev);
3995 if (err)
3996 goto error_up;
3997
3998 err = qlge_adapter_up(qdev);
3999 if (err)
4000 goto error_up;
4001
4002 return err;
4003
4004 error_up:
4005 qlge_release_adapter_resources(qdev);
4006 return err;
4007 }
4008
4009 static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4010 {
4011 int status;
4012
4013
4014 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4015 int i = 4;
4016
4017 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4018 netif_err(qdev, ifup, qdev->ndev,
4019 "Waiting for adapter UP...\n");
4020 ssleep(1);
4021 }
4022
4023 if (!i) {
4024 netif_err(qdev, ifup, qdev->ndev,
4025 "Timed out waiting for adapter UP\n");
4026 return -ETIMEDOUT;
4027 }
4028 }
4029
4030 status = qlge_adapter_down(qdev);
4031 if (status)
4032 goto error;
4033
4034 qlge_set_lb_size(qdev);
4035
4036 status = qlge_adapter_up(qdev);
4037 if (status)
4038 goto error;
4039
4040 return status;
4041 error:
4042 netif_alert(qdev, ifup, qdev->ndev,
4043 "Driver up/down cycle failed, closing device.\n");
4044 set_bit(QL_ADAPTER_UP, &qdev->flags);
4045 dev_close(qdev->ndev);
4046 return status;
4047 }
4048
4049 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4050 {
4051 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4052 int status;
4053
4054 if (ndev->mtu == 1500 && new_mtu == 9000)
4055 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4056 else if (ndev->mtu == 9000 && new_mtu == 1500)
4057 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4058 else
4059 return -EINVAL;
4060
4061 queue_delayed_work(qdev->workqueue,
4062 &qdev->mpi_port_cfg_work, 3 * HZ);
4063
4064 ndev->mtu = new_mtu;
4065
4066 if (!netif_running(qdev->ndev))
4067 return 0;
4068
4069 status = qlge_change_rx_buffers(qdev);
4070 if (status) {
4071 netif_err(qdev, ifup, qdev->ndev,
4072 "Changing MTU failed.\n");
4073 }
4074
4075 return status;
4076 }
4077
4078 static struct net_device_stats *qlge_get_stats(struct net_device
4079 *ndev)
4080 {
4081 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4082 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4083 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4084 unsigned long pkts, mcast, dropped, errors, bytes;
4085 int i;
4086
4087
4088 pkts = mcast = dropped = errors = bytes = 0;
4089 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4090 pkts += rx_ring->rx_packets;
4091 bytes += rx_ring->rx_bytes;
4092 dropped += rx_ring->rx_dropped;
4093 errors += rx_ring->rx_errors;
4094 mcast += rx_ring->rx_multicast;
4095 }
4096 ndev->stats.rx_packets = pkts;
4097 ndev->stats.rx_bytes = bytes;
4098 ndev->stats.rx_dropped = dropped;
4099 ndev->stats.rx_errors = errors;
4100 ndev->stats.multicast = mcast;
4101
4102
4103 pkts = errors = bytes = 0;
4104 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4105 pkts += tx_ring->tx_packets;
4106 bytes += tx_ring->tx_bytes;
4107 errors += tx_ring->tx_errors;
4108 }
4109 ndev->stats.tx_packets = pkts;
4110 ndev->stats.tx_bytes = bytes;
4111 ndev->stats.tx_errors = errors;
4112 return &ndev->stats;
4113 }
4114
4115 static void qlge_set_multicast_list(struct net_device *ndev)
4116 {
4117 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4118 struct netdev_hw_addr *ha;
4119 int i, status;
4120
4121 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4122 if (status)
4123 return;
4124
4125
4126
4127
4128 if (ndev->flags & IFF_PROMISC) {
4129 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4130 if (qlge_set_routing_reg
4131 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4132 netif_err(qdev, hw, qdev->ndev,
4133 "Failed to set promiscuous mode.\n");
4134 } else {
4135 set_bit(QL_PROMISCUOUS, &qdev->flags);
4136 }
4137 }
4138 } else {
4139 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4140 if (qlge_set_routing_reg
4141 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4142 netif_err(qdev, hw, qdev->ndev,
4143 "Failed to clear promiscuous mode.\n");
4144 } else {
4145 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4146 }
4147 }
4148 }
4149
4150
4151
4152
4153
4154 if ((ndev->flags & IFF_ALLMULTI) ||
4155 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4156 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4157 if (qlge_set_routing_reg
4158 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4159 netif_err(qdev, hw, qdev->ndev,
4160 "Failed to set all-multi mode.\n");
4161 } else {
4162 set_bit(QL_ALLMULTI, &qdev->flags);
4163 }
4164 }
4165 } else {
4166 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4167 if (qlge_set_routing_reg
4168 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4169 netif_err(qdev, hw, qdev->ndev,
4170 "Failed to clear all-multi mode.\n");
4171 } else {
4172 clear_bit(QL_ALLMULTI, &qdev->flags);
4173 }
4174 }
4175 }
4176
4177 if (!netdev_mc_empty(ndev)) {
4178 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4179 if (status)
4180 goto exit;
4181 i = 0;
4182 netdev_for_each_mc_addr(ha, ndev) {
4183 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4184 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4185 netif_err(qdev, hw, qdev->ndev,
4186 "Failed to loadmulticast address.\n");
4187 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4188 goto exit;
4189 }
4190 i++;
4191 }
4192 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4193 if (qlge_set_routing_reg
4194 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4195 netif_err(qdev, hw, qdev->ndev,
4196 "Failed to set multicast match mode.\n");
4197 } else {
4198 set_bit(QL_ALLMULTI, &qdev->flags);
4199 }
4200 }
4201 exit:
4202 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4203 }
4204
4205 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4206 {
4207 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4208 struct sockaddr *addr = p;
4209 int status;
4210
4211 if (!is_valid_ether_addr(addr->sa_data))
4212 return -EADDRNOTAVAIL;
4213 eth_hw_addr_set(ndev, addr->sa_data);
4214
4215 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4216
4217 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4218 if (status)
4219 return status;
4220 status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
4221 MAC_ADDR_TYPE_CAM_MAC,
4222 qdev->func * MAX_CQ);
4223 if (status)
4224 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4225 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4226 return status;
4227 }
4228
4229 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4230 {
4231 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4232
4233 qlge_queue_asic_error(qdev);
4234 }
4235
4236 static void qlge_asic_reset_work(struct work_struct *work)
4237 {
4238 struct qlge_adapter *qdev =
4239 container_of(work, struct qlge_adapter, asic_reset_work.work);
4240 int status;
4241
4242 rtnl_lock();
4243 status = qlge_adapter_down(qdev);
4244 if (status)
4245 goto error;
4246
4247 status = qlge_adapter_up(qdev);
4248 if (status)
4249 goto error;
4250
4251
4252 clear_bit(QL_ALLMULTI, &qdev->flags);
4253 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4254 qlge_set_multicast_list(qdev->ndev);
4255
4256 rtnl_unlock();
4257 return;
4258 error:
4259 netif_alert(qdev, ifup, qdev->ndev,
4260 "Driver up/down cycle failed, closing device\n");
4261
4262 set_bit(QL_ADAPTER_UP, &qdev->flags);
4263 dev_close(qdev->ndev);
4264 rtnl_unlock();
4265 }
4266
4267 static const struct nic_operations qla8012_nic_ops = {
4268 .get_flash = qlge_get_8012_flash_params,
4269 .port_initialize = qlge_8012_port_initialize,
4270 };
4271
4272 static const struct nic_operations qla8000_nic_ops = {
4273 .get_flash = qlge_get_8000_flash_params,
4274 .port_initialize = qlge_8000_port_initialize,
4275 };
4276
4277
4278
4279
4280
4281
4282
4283
4284 static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4285 {
4286 int status = 0;
4287 u32 temp;
4288 u32 nic_func1, nic_func2;
4289
4290 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4291 &temp);
4292 if (status)
4293 return status;
4294
4295 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4296 MPI_TEST_NIC_FUNC_MASK);
4297 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4298 MPI_TEST_NIC_FUNC_MASK);
4299
4300 if (qdev->func == nic_func1)
4301 qdev->alt_func = nic_func2;
4302 else if (qdev->func == nic_func2)
4303 qdev->alt_func = nic_func1;
4304 else
4305 status = -EIO;
4306
4307 return status;
4308 }
4309
4310 static int qlge_get_board_info(struct qlge_adapter *qdev)
4311 {
4312 int status;
4313
4314 qdev->func =
4315 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4316 if (qdev->func > 3)
4317 return -EIO;
4318
4319 status = qlge_get_alt_pcie_func(qdev);
4320 if (status)
4321 return status;
4322
4323 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4324 if (qdev->port) {
4325 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4326 qdev->port_link_up = STS_PL1;
4327 qdev->port_init = STS_PI1;
4328 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4329 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4330 } else {
4331 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4332 qdev->port_link_up = STS_PL0;
4333 qdev->port_init = STS_PI0;
4334 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4335 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4336 }
4337 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4338 qdev->device_id = qdev->pdev->device;
4339 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4340 qdev->nic_ops = &qla8012_nic_ops;
4341 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4342 qdev->nic_ops = &qla8000_nic_ops;
4343 return status;
4344 }
4345
4346 static void qlge_release_all(struct pci_dev *pdev)
4347 {
4348 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4349
4350 if (qdev->workqueue) {
4351 destroy_workqueue(qdev->workqueue);
4352 qdev->workqueue = NULL;
4353 }
4354
4355 if (qdev->reg_base)
4356 iounmap(qdev->reg_base);
4357 if (qdev->doorbell_area)
4358 iounmap(qdev->doorbell_area);
4359 vfree(qdev->mpi_coredump);
4360 pci_release_regions(pdev);
4361 }
4362
4363 static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4364 int cards_found)
4365 {
4366 struct net_device *ndev = qdev->ndev;
4367 int err = 0;
4368
4369 err = pci_enable_device(pdev);
4370 if (err) {
4371 dev_err(&pdev->dev, "PCI device enable failed.\n");
4372 return err;
4373 }
4374
4375 qdev->pdev = pdev;
4376 pci_set_drvdata(pdev, qdev);
4377
4378
4379 err = pcie_set_readrq(pdev, 4096);
4380 if (err) {
4381 dev_err(&pdev->dev, "Set readrq failed.\n");
4382 goto err_disable_pci;
4383 }
4384
4385 err = pci_request_regions(pdev, DRV_NAME);
4386 if (err) {
4387 dev_err(&pdev->dev, "PCI region request failed.\n");
4388 goto err_disable_pci;
4389 }
4390
4391 pci_set_master(pdev);
4392 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4393 set_bit(QL_DMA64, &qdev->flags);
4394 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4395 } else {
4396 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4397 if (!err)
4398 err = dma_set_coherent_mask(&pdev->dev,
4399 DMA_BIT_MASK(32));
4400 }
4401
4402 if (err) {
4403 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4404 goto err_release_pci;
4405 }
4406
4407
4408 pdev->needs_freset = 1;
4409 pci_save_state(pdev);
4410 qdev->reg_base =
4411 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4412 if (!qdev->reg_base) {
4413 dev_err(&pdev->dev, "Register mapping failed.\n");
4414 err = -ENOMEM;
4415 goto err_release_pci;
4416 }
4417
4418 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4419 qdev->doorbell_area =
4420 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4421 if (!qdev->doorbell_area) {
4422 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4423 err = -ENOMEM;
4424 goto err_iounmap_base;
4425 }
4426
4427 err = qlge_get_board_info(qdev);
4428 if (err) {
4429 dev_err(&pdev->dev, "Register access failed.\n");
4430 err = -EIO;
4431 goto err_iounmap_doorbell;
4432 }
4433 qdev->msg_enable = netif_msg_init(debug, default_msg);
4434 spin_lock_init(&qdev->stats_lock);
4435
4436 if (qlge_mpi_coredump) {
4437 qdev->mpi_coredump =
4438 vmalloc(sizeof(struct qlge_mpi_coredump));
4439 if (!qdev->mpi_coredump) {
4440 err = -ENOMEM;
4441 goto err_iounmap_doorbell;
4442 }
4443 if (qlge_force_coredump)
4444 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4445 }
4446
4447 err = qdev->nic_ops->get_flash(qdev);
4448 if (err) {
4449 dev_err(&pdev->dev, "Invalid FLASH.\n");
4450 goto err_free_mpi_coredump;
4451 }
4452
4453
4454 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4455
4456
4457 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4458 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4459
4460
4461 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4462 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4463 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4464 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4465
4466
4467
4468
4469 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4470 ndev->name);
4471 if (!qdev->workqueue) {
4472 err = -ENOMEM;
4473 goto err_free_mpi_coredump;
4474 }
4475
4476 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4477 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4478 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4479 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4480 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4481 init_completion(&qdev->ide_completion);
4482 mutex_init(&qdev->mpi_mutex);
4483
4484 if (!cards_found) {
4485 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4486 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4487 DRV_NAME, DRV_VERSION);
4488 }
4489 return 0;
4490
4491 err_free_mpi_coredump:
4492 vfree(qdev->mpi_coredump);
4493 err_iounmap_doorbell:
4494 iounmap(qdev->doorbell_area);
4495 err_iounmap_base:
4496 iounmap(qdev->reg_base);
4497 err_release_pci:
4498 pci_release_regions(pdev);
4499 err_disable_pci:
4500 pci_disable_device(pdev);
4501
4502 return err;
4503 }
4504
4505 static const struct net_device_ops qlge_netdev_ops = {
4506 .ndo_open = qlge_open,
4507 .ndo_stop = qlge_close,
4508 .ndo_start_xmit = qlge_send,
4509 .ndo_change_mtu = qlge_change_mtu,
4510 .ndo_get_stats = qlge_get_stats,
4511 .ndo_set_rx_mode = qlge_set_multicast_list,
4512 .ndo_set_mac_address = qlge_set_mac_address,
4513 .ndo_validate_addr = eth_validate_addr,
4514 .ndo_tx_timeout = qlge_tx_timeout,
4515 .ndo_set_features = qlge_set_features,
4516 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4517 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4518 };
4519
4520 static void qlge_timer(struct timer_list *t)
4521 {
4522 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4523 u32 var = 0;
4524
4525 var = qlge_read32(qdev, STS);
4526 if (pci_channel_offline(qdev->pdev)) {
4527 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4528 return;
4529 }
4530
4531 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4532 }
4533
4534 static const struct devlink_ops qlge_devlink_ops;
4535
4536 static int qlge_probe(struct pci_dev *pdev,
4537 const struct pci_device_id *pci_entry)
4538 {
4539 struct qlge_netdev_priv *ndev_priv;
4540 struct qlge_adapter *qdev = NULL;
4541 struct net_device *ndev = NULL;
4542 struct devlink *devlink;
4543 static int cards_found;
4544 int err;
4545
4546 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
4547 &pdev->dev);
4548 if (!devlink)
4549 return -ENOMEM;
4550
4551 qdev = devlink_priv(devlink);
4552
4553 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4554 min(MAX_CPUS,
4555 netif_get_num_default_rss_queues()));
4556 if (!ndev) {
4557 err = -ENOMEM;
4558 goto devlink_free;
4559 }
4560
4561 ndev_priv = netdev_priv(ndev);
4562 ndev_priv->qdev = qdev;
4563 ndev_priv->ndev = ndev;
4564 qdev->ndev = ndev;
4565 err = qlge_init_device(pdev, qdev, cards_found);
4566 if (err < 0)
4567 goto netdev_free;
4568
4569 SET_NETDEV_DEV(ndev, &pdev->dev);
4570 ndev->hw_features = NETIF_F_SG |
4571 NETIF_F_IP_CSUM |
4572 NETIF_F_TSO |
4573 NETIF_F_TSO_ECN |
4574 NETIF_F_HW_VLAN_CTAG_TX |
4575 NETIF_F_HW_VLAN_CTAG_RX |
4576 NETIF_F_HW_VLAN_CTAG_FILTER |
4577 NETIF_F_RXCSUM;
4578 ndev->features = ndev->hw_features;
4579 ndev->vlan_features = ndev->hw_features;
4580
4581 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4582 NETIF_F_HW_VLAN_CTAG_TX |
4583 NETIF_F_HW_VLAN_CTAG_RX);
4584
4585 if (test_bit(QL_DMA64, &qdev->flags))
4586 ndev->features |= NETIF_F_HIGHDMA;
4587
4588
4589
4590
4591 ndev->tx_queue_len = qdev->tx_ring_size;
4592 ndev->irq = pdev->irq;
4593
4594 ndev->netdev_ops = &qlge_netdev_ops;
4595 ndev->ethtool_ops = &qlge_ethtool_ops;
4596 ndev->watchdog_timeo = 10 * HZ;
4597
4598
4599
4600
4601
4602 ndev->min_mtu = ETH_DATA_LEN;
4603 ndev->max_mtu = 9000;
4604
4605 err = register_netdev(ndev);
4606 if (err) {
4607 dev_err(&pdev->dev, "net device registration failed.\n");
4608 goto cleanup_pdev;
4609 }
4610
4611 err = qlge_health_create_reporters(qdev);
4612 if (err)
4613 goto unregister_netdev;
4614
4615
4616
4617
4618 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4619 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4620 qlge_link_off(qdev);
4621 qlge_display_dev_info(ndev);
4622 atomic_set(&qdev->lb_count, 0);
4623 cards_found++;
4624 devlink_register(devlink);
4625 return 0;
4626
4627 unregister_netdev:
4628 unregister_netdev(ndev);
4629 cleanup_pdev:
4630 qlge_release_all(pdev);
4631 pci_disable_device(pdev);
4632 netdev_free:
4633 free_netdev(ndev);
4634 devlink_free:
4635 devlink_free(devlink);
4636
4637 return err;
4638 }
4639
4640 netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4641 {
4642 return qlge_send(skb, ndev);
4643 }
4644
4645 int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4646 {
4647 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4648 }
4649
4650 static void qlge_remove(struct pci_dev *pdev)
4651 {
4652 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4653 struct net_device *ndev = qdev->ndev;
4654 struct devlink *devlink = priv_to_devlink(qdev);
4655
4656 devlink_unregister(devlink);
4657 del_timer_sync(&qdev->timer);
4658 qlge_cancel_all_work_sync(qdev);
4659 unregister_netdev(ndev);
4660 qlge_release_all(pdev);
4661 pci_disable_device(pdev);
4662 devlink_health_reporter_destroy(qdev->reporter);
4663 devlink_free(devlink);
4664 free_netdev(ndev);
4665 }
4666
4667
4668 static void qlge_eeh_close(struct net_device *ndev)
4669 {
4670 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4671 int i;
4672
4673 if (netif_carrier_ok(ndev)) {
4674 netif_carrier_off(ndev);
4675 netif_stop_queue(ndev);
4676 }
4677
4678
4679 qlge_cancel_all_work_sync(qdev);
4680
4681 for (i = 0; i < qdev->rss_ring_count; i++)
4682 netif_napi_del(&qdev->rx_ring[i].napi);
4683
4684 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4685 qlge_tx_ring_clean(qdev);
4686 qlge_free_rx_buffers(qdev);
4687 qlge_release_adapter_resources(qdev);
4688 }
4689
4690
4691
4692
4693
4694 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4695 pci_channel_state_t state)
4696 {
4697 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4698 struct net_device *ndev = qdev->ndev;
4699
4700 switch (state) {
4701 case pci_channel_io_normal:
4702 return PCI_ERS_RESULT_CAN_RECOVER;
4703 case pci_channel_io_frozen:
4704 netif_device_detach(ndev);
4705 del_timer_sync(&qdev->timer);
4706 if (netif_running(ndev))
4707 qlge_eeh_close(ndev);
4708 pci_disable_device(pdev);
4709 return PCI_ERS_RESULT_NEED_RESET;
4710 case pci_channel_io_perm_failure:
4711 dev_err(&pdev->dev,
4712 "%s: pci_channel_io_perm_failure.\n", __func__);
4713 del_timer_sync(&qdev->timer);
4714 qlge_eeh_close(ndev);
4715 set_bit(QL_EEH_FATAL, &qdev->flags);
4716 return PCI_ERS_RESULT_DISCONNECT;
4717 }
4718
4719
4720 return PCI_ERS_RESULT_NEED_RESET;
4721 }
4722
4723
4724
4725
4726
4727
4728
4729 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4730 {
4731 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4732
4733 pdev->error_state = pci_channel_io_normal;
4734
4735 pci_restore_state(pdev);
4736 if (pci_enable_device(pdev)) {
4737 netif_err(qdev, ifup, qdev->ndev,
4738 "Cannot re-enable PCI device after reset.\n");
4739 return PCI_ERS_RESULT_DISCONNECT;
4740 }
4741 pci_set_master(pdev);
4742
4743 if (qlge_adapter_reset(qdev)) {
4744 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4745 set_bit(QL_EEH_FATAL, &qdev->flags);
4746 return PCI_ERS_RESULT_DISCONNECT;
4747 }
4748
4749 return PCI_ERS_RESULT_RECOVERED;
4750 }
4751
4752 static void qlge_io_resume(struct pci_dev *pdev)
4753 {
4754 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4755 struct net_device *ndev = qdev->ndev;
4756 int err = 0;
4757
4758 if (netif_running(ndev)) {
4759 err = qlge_open(ndev);
4760 if (err) {
4761 netif_err(qdev, ifup, qdev->ndev,
4762 "Device initialization failed after reset.\n");
4763 return;
4764 }
4765 } else {
4766 netif_err(qdev, ifup, qdev->ndev,
4767 "Device was not running prior to EEH.\n");
4768 }
4769 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4770 netif_device_attach(ndev);
4771 }
4772
4773 static const struct pci_error_handlers qlge_err_handler = {
4774 .error_detected = qlge_io_error_detected,
4775 .slot_reset = qlge_io_slot_reset,
4776 .resume = qlge_io_resume,
4777 };
4778
4779 static int __maybe_unused qlge_suspend(struct device *dev_d)
4780 {
4781 struct pci_dev *pdev = to_pci_dev(dev_d);
4782 struct qlge_adapter *qdev;
4783 struct net_device *ndev;
4784 int err;
4785
4786 qdev = pci_get_drvdata(pdev);
4787 ndev = qdev->ndev;
4788 netif_device_detach(ndev);
4789 del_timer_sync(&qdev->timer);
4790
4791 if (netif_running(ndev)) {
4792 err = qlge_adapter_down(qdev);
4793 if (!err)
4794 return err;
4795 }
4796
4797 qlge_wol(qdev);
4798
4799 return 0;
4800 }
4801
4802 static int __maybe_unused qlge_resume(struct device *dev_d)
4803 {
4804 struct pci_dev *pdev = to_pci_dev(dev_d);
4805 struct qlge_adapter *qdev;
4806 struct net_device *ndev;
4807 int err;
4808
4809 qdev = pci_get_drvdata(pdev);
4810 ndev = qdev->ndev;
4811
4812 pci_set_master(pdev);
4813
4814 device_wakeup_disable(dev_d);
4815
4816 if (netif_running(ndev)) {
4817 err = qlge_adapter_up(qdev);
4818 if (err)
4819 return err;
4820 }
4821
4822 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4823 netif_device_attach(ndev);
4824
4825 return 0;
4826 }
4827
4828 static void qlge_shutdown(struct pci_dev *pdev)
4829 {
4830 qlge_suspend(&pdev->dev);
4831 }
4832
4833 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4834
4835 static struct pci_driver qlge_driver = {
4836 .name = DRV_NAME,
4837 .id_table = qlge_pci_tbl,
4838 .probe = qlge_probe,
4839 .remove = qlge_remove,
4840 .driver.pm = &qlge_pm_ops,
4841 .shutdown = qlge_shutdown,
4842 .err_handler = &qlge_err_handler
4843 };
4844
4845 module_pci_driver(qlge_driver);