Back to home page

OSCL-LXR

 
 

    


0001 /************************************************************************
0002  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0003  * Copyright(c) 2002-2010 Exar Corp.
0004  *
0005  * This software may be used and distributed according to the terms of
0006  * the GNU General Public License (GPL), incorporated herein by reference.
0007  * Drivers based on or derived from this code fall under the GPL and must
0008  * retain the authorship, copyright and license notice.  This file is not
0009  * a complete program and may only be used when the entire operating
0010  * system is licensed under the GPL.
0011  * See the file COPYING in this distribution for more information.
0012  *
0013  * Credits:
0014  * Jeff Garzik      : For pointing out the improper error condition
0015  *            check in the s2io_xmit routine and also some
0016  *            issues in the Tx watch dog function. Also for
0017  *            patiently answering all those innumerable
0018  *            questions regaring the 2.6 porting issues.
0019  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
0020  *            macros available only in 2.6 Kernel.
0021  * Francois Romieu  : For pointing out all code part that were
0022  *            deprecated and also styling related comments.
0023  * Grant Grundler   : For helping me get rid of some Architecture
0024  *            dependent code.
0025  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
0026  *
0027  * The module loadable parameters that are supported by the driver and a brief
0028  * explanation of all the variables.
0029  *
0030  * rx_ring_num : This can be used to program the number of receive rings used
0031  * in the driver.
0032  * rx_ring_sz: This defines the number of receive blocks each ring can have.
0033  *     This is also an array of size 8.
0034  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
0035  *      values are 1, 2.
0036  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
0037  * tx_fifo_len: This too is an array of 8. Each element defines the number of
0038  * Tx descriptors that can be associated with each corresponding FIFO.
0039  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
0040  *     2(MSI_X). Default value is '2(MSI_X)'
0041  * lro_max_pkts: This parameter defines maximum number of packets can be
0042  *     aggregated as a single large packet
0043  * napi: This parameter used to enable/disable NAPI (polling Rx)
0044  *     Possible values '1' for enable and '0' for disable. Default is '1'
0045  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
0046  *                 Possible values '1' for enable , '0' for disable.
0047  *                 Default is '2' - which means disable in promisc mode
0048  *                 and enable in non-promiscuous mode.
0049  * multiq: This parameter used to enable/disable MULTIQUEUE support.
0050  *      Possible values '1' for enable and '0' for disable. Default is '0'
0051  ************************************************************************/
0052 
0053 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0054 
0055 #include <linux/module.h>
0056 #include <linux/types.h>
0057 #include <linux/errno.h>
0058 #include <linux/ioport.h>
0059 #include <linux/pci.h>
0060 #include <linux/dma-mapping.h>
0061 #include <linux/kernel.h>
0062 #include <linux/netdevice.h>
0063 #include <linux/etherdevice.h>
0064 #include <linux/mdio.h>
0065 #include <linux/skbuff.h>
0066 #include <linux/init.h>
0067 #include <linux/delay.h>
0068 #include <linux/stddef.h>
0069 #include <linux/ioctl.h>
0070 #include <linux/timex.h>
0071 #include <linux/ethtool.h>
0072 #include <linux/workqueue.h>
0073 #include <linux/if_vlan.h>
0074 #include <linux/ip.h>
0075 #include <linux/tcp.h>
0076 #include <linux/uaccess.h>
0077 #include <linux/io.h>
0078 #include <linux/io-64-nonatomic-lo-hi.h>
0079 #include <linux/slab.h>
0080 #include <linux/prefetch.h>
0081 #include <net/tcp.h>
0082 #include <net/checksum.h>
0083 
0084 #include <asm/div64.h>
0085 #include <asm/irq.h>
0086 
0087 /* local include */
0088 #include "s2io.h"
0089 #include "s2io-regs.h"
0090 
0091 #define DRV_VERSION "2.0.26.28"
0092 
0093 /* S2io Driver name & version. */
0094 static const char s2io_driver_name[] = "Neterion";
0095 static const char s2io_driver_version[] = DRV_VERSION;
0096 
0097 static const int rxd_size[2] = {32, 48};
0098 static const int rxd_count[2] = {127, 85};
0099 
0100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
0101 {
0102     int ret;
0103 
0104     ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
0105            (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
0106 
0107     return ret;
0108 }
0109 
0110 /*
0111  * Cards with following subsystem_id have a link state indication
0112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
0113  * macro below identifies these cards given the subsystem_id.
0114  */
0115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)      \
0116     (dev_type == XFRAME_I_DEVICE) ?                 \
0117     ((((subid >= 0x600B) && (subid <= 0x600D)) ||           \
0118       ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
0119 
0120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
0121                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
0122 
0123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
0124 {
0125     return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
0126 }
0127 
0128 /* Ethtool related variables and Macros. */
0129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
0130     "Register test\t(offline)",
0131     "Eeprom test\t(offline)",
0132     "Link test\t(online)",
0133     "RLDRAM test\t(offline)",
0134     "BIST Test\t(offline)"
0135 };
0136 
0137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
0138     {"tmac_frms"},
0139     {"tmac_data_octets"},
0140     {"tmac_drop_frms"},
0141     {"tmac_mcst_frms"},
0142     {"tmac_bcst_frms"},
0143     {"tmac_pause_ctrl_frms"},
0144     {"tmac_ttl_octets"},
0145     {"tmac_ucst_frms"},
0146     {"tmac_nucst_frms"},
0147     {"tmac_any_err_frms"},
0148     {"tmac_ttl_less_fb_octets"},
0149     {"tmac_vld_ip_octets"},
0150     {"tmac_vld_ip"},
0151     {"tmac_drop_ip"},
0152     {"tmac_icmp"},
0153     {"tmac_rst_tcp"},
0154     {"tmac_tcp"},
0155     {"tmac_udp"},
0156     {"rmac_vld_frms"},
0157     {"rmac_data_octets"},
0158     {"rmac_fcs_err_frms"},
0159     {"rmac_drop_frms"},
0160     {"rmac_vld_mcst_frms"},
0161     {"rmac_vld_bcst_frms"},
0162     {"rmac_in_rng_len_err_frms"},
0163     {"rmac_out_rng_len_err_frms"},
0164     {"rmac_long_frms"},
0165     {"rmac_pause_ctrl_frms"},
0166     {"rmac_unsup_ctrl_frms"},
0167     {"rmac_ttl_octets"},
0168     {"rmac_accepted_ucst_frms"},
0169     {"rmac_accepted_nucst_frms"},
0170     {"rmac_discarded_frms"},
0171     {"rmac_drop_events"},
0172     {"rmac_ttl_less_fb_octets"},
0173     {"rmac_ttl_frms"},
0174     {"rmac_usized_frms"},
0175     {"rmac_osized_frms"},
0176     {"rmac_frag_frms"},
0177     {"rmac_jabber_frms"},
0178     {"rmac_ttl_64_frms"},
0179     {"rmac_ttl_65_127_frms"},
0180     {"rmac_ttl_128_255_frms"},
0181     {"rmac_ttl_256_511_frms"},
0182     {"rmac_ttl_512_1023_frms"},
0183     {"rmac_ttl_1024_1518_frms"},
0184     {"rmac_ip"},
0185     {"rmac_ip_octets"},
0186     {"rmac_hdr_err_ip"},
0187     {"rmac_drop_ip"},
0188     {"rmac_icmp"},
0189     {"rmac_tcp"},
0190     {"rmac_udp"},
0191     {"rmac_err_drp_udp"},
0192     {"rmac_xgmii_err_sym"},
0193     {"rmac_frms_q0"},
0194     {"rmac_frms_q1"},
0195     {"rmac_frms_q2"},
0196     {"rmac_frms_q3"},
0197     {"rmac_frms_q4"},
0198     {"rmac_frms_q5"},
0199     {"rmac_frms_q6"},
0200     {"rmac_frms_q7"},
0201     {"rmac_full_q0"},
0202     {"rmac_full_q1"},
0203     {"rmac_full_q2"},
0204     {"rmac_full_q3"},
0205     {"rmac_full_q4"},
0206     {"rmac_full_q5"},
0207     {"rmac_full_q6"},
0208     {"rmac_full_q7"},
0209     {"rmac_pause_cnt"},
0210     {"rmac_xgmii_data_err_cnt"},
0211     {"rmac_xgmii_ctrl_err_cnt"},
0212     {"rmac_accepted_ip"},
0213     {"rmac_err_tcp"},
0214     {"rd_req_cnt"},
0215     {"new_rd_req_cnt"},
0216     {"new_rd_req_rtry_cnt"},
0217     {"rd_rtry_cnt"},
0218     {"wr_rtry_rd_ack_cnt"},
0219     {"wr_req_cnt"},
0220     {"new_wr_req_cnt"},
0221     {"new_wr_req_rtry_cnt"},
0222     {"wr_rtry_cnt"},
0223     {"wr_disc_cnt"},
0224     {"rd_rtry_wr_ack_cnt"},
0225     {"txp_wr_cnt"},
0226     {"txd_rd_cnt"},
0227     {"txd_wr_cnt"},
0228     {"rxd_rd_cnt"},
0229     {"rxd_wr_cnt"},
0230     {"txf_rd_cnt"},
0231     {"rxf_wr_cnt"}
0232 };
0233 
0234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
0235     {"rmac_ttl_1519_4095_frms"},
0236     {"rmac_ttl_4096_8191_frms"},
0237     {"rmac_ttl_8192_max_frms"},
0238     {"rmac_ttl_gt_max_frms"},
0239     {"rmac_osized_alt_frms"},
0240     {"rmac_jabber_alt_frms"},
0241     {"rmac_gt_max_alt_frms"},
0242     {"rmac_vlan_frms"},
0243     {"rmac_len_discard"},
0244     {"rmac_fcs_discard"},
0245     {"rmac_pf_discard"},
0246     {"rmac_da_discard"},
0247     {"rmac_red_discard"},
0248     {"rmac_rts_discard"},
0249     {"rmac_ingm_full_discard"},
0250     {"link_fault_cnt"}
0251 };
0252 
0253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
0254     {"\n DRIVER STATISTICS"},
0255     {"single_bit_ecc_errs"},
0256     {"double_bit_ecc_errs"},
0257     {"parity_err_cnt"},
0258     {"serious_err_cnt"},
0259     {"soft_reset_cnt"},
0260     {"fifo_full_cnt"},
0261     {"ring_0_full_cnt"},
0262     {"ring_1_full_cnt"},
0263     {"ring_2_full_cnt"},
0264     {"ring_3_full_cnt"},
0265     {"ring_4_full_cnt"},
0266     {"ring_5_full_cnt"},
0267     {"ring_6_full_cnt"},
0268     {"ring_7_full_cnt"},
0269     {"alarm_transceiver_temp_high"},
0270     {"alarm_transceiver_temp_low"},
0271     {"alarm_laser_bias_current_high"},
0272     {"alarm_laser_bias_current_low"},
0273     {"alarm_laser_output_power_high"},
0274     {"alarm_laser_output_power_low"},
0275     {"warn_transceiver_temp_high"},
0276     {"warn_transceiver_temp_low"},
0277     {"warn_laser_bias_current_high"},
0278     {"warn_laser_bias_current_low"},
0279     {"warn_laser_output_power_high"},
0280     {"warn_laser_output_power_low"},
0281     {"lro_aggregated_pkts"},
0282     {"lro_flush_both_count"},
0283     {"lro_out_of_sequence_pkts"},
0284     {"lro_flush_due_to_max_pkts"},
0285     {"lro_avg_aggr_pkts"},
0286     {"mem_alloc_fail_cnt"},
0287     {"pci_map_fail_cnt"},
0288     {"watchdog_timer_cnt"},
0289     {"mem_allocated"},
0290     {"mem_freed"},
0291     {"link_up_cnt"},
0292     {"link_down_cnt"},
0293     {"link_up_time"},
0294     {"link_down_time"},
0295     {"tx_tcode_buf_abort_cnt"},
0296     {"tx_tcode_desc_abort_cnt"},
0297     {"tx_tcode_parity_err_cnt"},
0298     {"tx_tcode_link_loss_cnt"},
0299     {"tx_tcode_list_proc_err_cnt"},
0300     {"rx_tcode_parity_err_cnt"},
0301     {"rx_tcode_abort_cnt"},
0302     {"rx_tcode_parity_abort_cnt"},
0303     {"rx_tcode_rda_fail_cnt"},
0304     {"rx_tcode_unkn_prot_cnt"},
0305     {"rx_tcode_fcs_err_cnt"},
0306     {"rx_tcode_buf_size_err_cnt"},
0307     {"rx_tcode_rxd_corrupt_cnt"},
0308     {"rx_tcode_unkn_err_cnt"},
0309     {"tda_err_cnt"},
0310     {"pfc_err_cnt"},
0311     {"pcc_err_cnt"},
0312     {"tti_err_cnt"},
0313     {"tpa_err_cnt"},
0314     {"sm_err_cnt"},
0315     {"lso_err_cnt"},
0316     {"mac_tmac_err_cnt"},
0317     {"mac_rmac_err_cnt"},
0318     {"xgxs_txgxs_err_cnt"},
0319     {"xgxs_rxgxs_err_cnt"},
0320     {"rc_err_cnt"},
0321     {"prc_pcix_err_cnt"},
0322     {"rpa_err_cnt"},
0323     {"rda_err_cnt"},
0324     {"rti_err_cnt"},
0325     {"mc_err_cnt"}
0326 };
0327 
0328 #define S2IO_XENA_STAT_LEN  ARRAY_SIZE(ethtool_xena_stats_keys)
0329 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
0330 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
0331 
0332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
0333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
0334 
0335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
0336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
0337 
0338 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
0339 #define S2IO_STRINGS_LEN    (S2IO_TEST_LEN * ETH_GSTRING_LEN)
0340 
0341 /* copy mac addr to def_mac_addr array */
0342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
0343 {
0344     sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
0345     sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
0346     sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
0347     sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
0348     sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
0349     sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
0350 }
0351 
0352 /*
0353  * Constants to be programmed into the Xena's registers, to configure
0354  * the XAUI.
0355  */
0356 
0357 #define END_SIGN    0x0
0358 static const u64 herc_act_dtx_cfg[] = {
0359     /* Set address */
0360     0x8000051536750000ULL, 0x80000515367500E0ULL,
0361     /* Write data */
0362     0x8000051536750004ULL, 0x80000515367500E4ULL,
0363     /* Set address */
0364     0x80010515003F0000ULL, 0x80010515003F00E0ULL,
0365     /* Write data */
0366     0x80010515003F0004ULL, 0x80010515003F00E4ULL,
0367     /* Set address */
0368     0x801205150D440000ULL, 0x801205150D4400E0ULL,
0369     /* Write data */
0370     0x801205150D440004ULL, 0x801205150D4400E4ULL,
0371     /* Set address */
0372     0x80020515F2100000ULL, 0x80020515F21000E0ULL,
0373     /* Write data */
0374     0x80020515F2100004ULL, 0x80020515F21000E4ULL,
0375     /* Done */
0376     END_SIGN
0377 };
0378 
0379 static const u64 xena_dtx_cfg[] = {
0380     /* Set address */
0381     0x8000051500000000ULL, 0x80000515000000E0ULL,
0382     /* Write data */
0383     0x80000515D9350004ULL, 0x80000515D93500E4ULL,
0384     /* Set address */
0385     0x8001051500000000ULL, 0x80010515000000E0ULL,
0386     /* Write data */
0387     0x80010515001E0004ULL, 0x80010515001E00E4ULL,
0388     /* Set address */
0389     0x8002051500000000ULL, 0x80020515000000E0ULL,
0390     /* Write data */
0391     0x80020515F2100004ULL, 0x80020515F21000E4ULL,
0392     END_SIGN
0393 };
0394 
0395 /*
0396  * Constants for Fixing the MacAddress problem seen mostly on
0397  * Alpha machines.
0398  */
0399 static const u64 fix_mac[] = {
0400     0x0060000000000000ULL, 0x0060600000000000ULL,
0401     0x0040600000000000ULL, 0x0000600000000000ULL,
0402     0x0020600000000000ULL, 0x0060600000000000ULL,
0403     0x0020600000000000ULL, 0x0060600000000000ULL,
0404     0x0020600000000000ULL, 0x0060600000000000ULL,
0405     0x0020600000000000ULL, 0x0060600000000000ULL,
0406     0x0020600000000000ULL, 0x0060600000000000ULL,
0407     0x0020600000000000ULL, 0x0060600000000000ULL,
0408     0x0020600000000000ULL, 0x0060600000000000ULL,
0409     0x0020600000000000ULL, 0x0060600000000000ULL,
0410     0x0020600000000000ULL, 0x0060600000000000ULL,
0411     0x0020600000000000ULL, 0x0060600000000000ULL,
0412     0x0020600000000000ULL, 0x0000600000000000ULL,
0413     0x0040600000000000ULL, 0x0060600000000000ULL,
0414     END_SIGN
0415 };
0416 
0417 MODULE_LICENSE("GPL");
0418 MODULE_VERSION(DRV_VERSION);
0419 
0420 
0421 /* Module Loadable parameters. */
0422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
0423 S2IO_PARM_INT(rx_ring_num, 1);
0424 S2IO_PARM_INT(multiq, 0);
0425 S2IO_PARM_INT(rx_ring_mode, 1);
0426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
0427 S2IO_PARM_INT(rmac_pause_time, 0x100);
0428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
0429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
0430 S2IO_PARM_INT(shared_splits, 0);
0431 S2IO_PARM_INT(tmac_util_period, 5);
0432 S2IO_PARM_INT(rmac_util_period, 5);
0433 S2IO_PARM_INT(l3l4hdr_size, 128);
0434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
0435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
0436 /* Frequency of Rx desc syncs expressed as power of 2 */
0437 S2IO_PARM_INT(rxsync_frequency, 3);
0438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
0439 S2IO_PARM_INT(intr_type, 2);
0440 /* Large receive offload feature */
0441 
0442 /* Max pkts to be aggregated by LRO at one time. If not specified,
0443  * aggregation happens until we hit max IP pkt size(64K)
0444  */
0445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
0446 S2IO_PARM_INT(indicate_max_pkts, 0);
0447 
0448 S2IO_PARM_INT(napi, 1);
0449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
0450 
0451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
0452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
0453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
0454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
0455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
0456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
0457 
0458 module_param_array(tx_fifo_len, uint, NULL, 0);
0459 module_param_array(rx_ring_sz, uint, NULL, 0);
0460 module_param_array(rts_frm_len, uint, NULL, 0);
0461 
0462 /*
0463  * S2IO device table.
0464  * This table lists all the devices that this driver supports.
0465  */
0466 static const struct pci_device_id s2io_tbl[] = {
0467     {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
0468      PCI_ANY_ID, PCI_ANY_ID},
0469     {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
0470      PCI_ANY_ID, PCI_ANY_ID},
0471     {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
0472      PCI_ANY_ID, PCI_ANY_ID},
0473     {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
0474      PCI_ANY_ID, PCI_ANY_ID},
0475     {0,}
0476 };
0477 
0478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
0479 
0480 static const struct pci_error_handlers s2io_err_handler = {
0481     .error_detected = s2io_io_error_detected,
0482     .slot_reset = s2io_io_slot_reset,
0483     .resume = s2io_io_resume,
0484 };
0485 
0486 static struct pci_driver s2io_driver = {
0487     .name = "S2IO",
0488     .id_table = s2io_tbl,
0489     .probe = s2io_init_nic,
0490     .remove = s2io_rem_nic,
0491     .err_handler = &s2io_err_handler,
0492 };
0493 
0494 /* A simplifier macro used both by init and free shared_mem Fns(). */
0495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
0496 
0497 /* netqueue manipulation helper functions */
0498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
0499 {
0500     if (!sp->config.multiq) {
0501         int i;
0502 
0503         for (i = 0; i < sp->config.tx_fifo_num; i++)
0504             sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
0505     }
0506     netif_tx_stop_all_queues(sp->dev);
0507 }
0508 
0509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
0510 {
0511     if (!sp->config.multiq)
0512         sp->mac_control.fifos[fifo_no].queue_state =
0513             FIFO_QUEUE_STOP;
0514 
0515     netif_tx_stop_all_queues(sp->dev);
0516 }
0517 
0518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
0519 {
0520     if (!sp->config.multiq) {
0521         int i;
0522 
0523         for (i = 0; i < sp->config.tx_fifo_num; i++)
0524             sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
0525     }
0526     netif_tx_start_all_queues(sp->dev);
0527 }
0528 
0529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
0530 {
0531     if (!sp->config.multiq) {
0532         int i;
0533 
0534         for (i = 0; i < sp->config.tx_fifo_num; i++)
0535             sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
0536     }
0537     netif_tx_wake_all_queues(sp->dev);
0538 }
0539 
0540 static inline void s2io_wake_tx_queue(
0541     struct fifo_info *fifo, int cnt, u8 multiq)
0542 {
0543 
0544     if (multiq) {
0545         if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
0546             netif_wake_subqueue(fifo->dev, fifo->fifo_no);
0547     } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
0548         if (netif_queue_stopped(fifo->dev)) {
0549             fifo->queue_state = FIFO_QUEUE_START;
0550             netif_wake_queue(fifo->dev);
0551         }
0552     }
0553 }
0554 
0555 /**
0556  * init_shared_mem - Allocation and Initialization of Memory
0557  * @nic: Device private variable.
0558  * Description: The function allocates all the memory areas shared
0559  * between the NIC and the driver. This includes Tx descriptors,
0560  * Rx descriptors and the statistics block.
0561  */
0562 
0563 static int init_shared_mem(struct s2io_nic *nic)
0564 {
0565     u32 size;
0566     void *tmp_v_addr, *tmp_v_addr_next;
0567     dma_addr_t tmp_p_addr, tmp_p_addr_next;
0568     struct RxD_block *pre_rxd_blk = NULL;
0569     int i, j, blk_cnt;
0570     int lst_size, lst_per_page;
0571     struct net_device *dev = nic->dev;
0572     unsigned long tmp;
0573     struct buffAdd *ba;
0574     struct config_param *config = &nic->config;
0575     struct mac_info *mac_control = &nic->mac_control;
0576     unsigned long long mem_allocated = 0;
0577 
0578     /* Allocation and initialization of TXDLs in FIFOs */
0579     size = 0;
0580     for (i = 0; i < config->tx_fifo_num; i++) {
0581         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0582 
0583         size += tx_cfg->fifo_len;
0584     }
0585     if (size > MAX_AVAILABLE_TXDS) {
0586         DBG_PRINT(ERR_DBG,
0587               "Too many TxDs requested: %d, max supported: %d\n",
0588               size, MAX_AVAILABLE_TXDS);
0589         return -EINVAL;
0590     }
0591 
0592     size = 0;
0593     for (i = 0; i < config->tx_fifo_num; i++) {
0594         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0595 
0596         size = tx_cfg->fifo_len;
0597         /*
0598          * Legal values are from 2 to 8192
0599          */
0600         if (size < 2) {
0601             DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
0602                   "Valid lengths are 2 through 8192\n",
0603                   i, size);
0604             return -EINVAL;
0605         }
0606     }
0607 
0608     lst_size = (sizeof(struct TxD) * config->max_txds);
0609     lst_per_page = PAGE_SIZE / lst_size;
0610 
0611     for (i = 0; i < config->tx_fifo_num; i++) {
0612         struct fifo_info *fifo = &mac_control->fifos[i];
0613         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0614         int fifo_len = tx_cfg->fifo_len;
0615         int list_holder_size = fifo_len * sizeof(struct list_info_hold);
0616 
0617         fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
0618         if (!fifo->list_info) {
0619             DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
0620             return -ENOMEM;
0621         }
0622         mem_allocated += list_holder_size;
0623     }
0624     for (i = 0; i < config->tx_fifo_num; i++) {
0625         int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
0626                         lst_per_page);
0627         struct fifo_info *fifo = &mac_control->fifos[i];
0628         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0629 
0630         fifo->tx_curr_put_info.offset = 0;
0631         fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
0632         fifo->tx_curr_get_info.offset = 0;
0633         fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
0634         fifo->fifo_no = i;
0635         fifo->nic = nic;
0636         fifo->max_txds = MAX_SKB_FRAGS + 2;
0637         fifo->dev = dev;
0638 
0639         for (j = 0; j < page_num; j++) {
0640             int k = 0;
0641             dma_addr_t tmp_p;
0642             void *tmp_v;
0643             tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
0644                            &tmp_p, GFP_KERNEL);
0645             if (!tmp_v) {
0646                 DBG_PRINT(INFO_DBG,
0647                       "dma_alloc_coherent failed for TxDL\n");
0648                 return -ENOMEM;
0649             }
0650             /* If we got a zero DMA address(can happen on
0651              * certain platforms like PPC), reallocate.
0652              * Store virtual address of page we don't want,
0653              * to be freed later.
0654              */
0655             if (!tmp_p) {
0656                 mac_control->zerodma_virt_addr = tmp_v;
0657                 DBG_PRINT(INIT_DBG,
0658                       "%s: Zero DMA address for TxDL. "
0659                       "Virtual address %p\n",
0660                       dev->name, tmp_v);
0661                 tmp_v = dma_alloc_coherent(&nic->pdev->dev,
0662                                PAGE_SIZE, &tmp_p,
0663                                GFP_KERNEL);
0664                 if (!tmp_v) {
0665                     DBG_PRINT(INFO_DBG,
0666                           "dma_alloc_coherent failed for TxDL\n");
0667                     return -ENOMEM;
0668                 }
0669                 mem_allocated += PAGE_SIZE;
0670             }
0671             while (k < lst_per_page) {
0672                 int l = (j * lst_per_page) + k;
0673                 if (l == tx_cfg->fifo_len)
0674                     break;
0675                 fifo->list_info[l].list_virt_addr =
0676                     tmp_v + (k * lst_size);
0677                 fifo->list_info[l].list_phy_addr =
0678                     tmp_p + (k * lst_size);
0679                 k++;
0680             }
0681         }
0682     }
0683 
0684     for (i = 0; i < config->tx_fifo_num; i++) {
0685         struct fifo_info *fifo = &mac_control->fifos[i];
0686         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0687 
0688         size = tx_cfg->fifo_len;
0689         fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
0690         if (!fifo->ufo_in_band_v)
0691             return -ENOMEM;
0692         mem_allocated += (size * sizeof(u64));
0693     }
0694 
0695     /* Allocation and initialization of RXDs in Rings */
0696     size = 0;
0697     for (i = 0; i < config->rx_ring_num; i++) {
0698         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
0699         struct ring_info *ring = &mac_control->rings[i];
0700 
0701         if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
0702             DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
0703                   "multiple of RxDs per Block\n",
0704                   dev->name, i);
0705             return FAILURE;
0706         }
0707         size += rx_cfg->num_rxd;
0708         ring->block_count = rx_cfg->num_rxd /
0709             (rxd_count[nic->rxd_mode] + 1);
0710         ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
0711     }
0712     if (nic->rxd_mode == RXD_MODE_1)
0713         size = (size * (sizeof(struct RxD1)));
0714     else
0715         size = (size * (sizeof(struct RxD3)));
0716 
0717     for (i = 0; i < config->rx_ring_num; i++) {
0718         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
0719         struct ring_info *ring = &mac_control->rings[i];
0720 
0721         ring->rx_curr_get_info.block_index = 0;
0722         ring->rx_curr_get_info.offset = 0;
0723         ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
0724         ring->rx_curr_put_info.block_index = 0;
0725         ring->rx_curr_put_info.offset = 0;
0726         ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
0727         ring->nic = nic;
0728         ring->ring_no = i;
0729 
0730         blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
0731         /*  Allocating all the Rx blocks */
0732         for (j = 0; j < blk_cnt; j++) {
0733             struct rx_block_info *rx_blocks;
0734             int l;
0735 
0736             rx_blocks = &ring->rx_blocks[j];
0737             size = SIZE_OF_BLOCK;   /* size is always page size */
0738             tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
0739                             &tmp_p_addr, GFP_KERNEL);
0740             if (tmp_v_addr == NULL) {
0741                 /*
0742                  * In case of failure, free_shared_mem()
0743                  * is called, which should free any
0744                  * memory that was alloced till the
0745                  * failure happened.
0746                  */
0747                 rx_blocks->block_virt_addr = tmp_v_addr;
0748                 return -ENOMEM;
0749             }
0750             mem_allocated += size;
0751 
0752             size = sizeof(struct rxd_info) *
0753                 rxd_count[nic->rxd_mode];
0754             rx_blocks->block_virt_addr = tmp_v_addr;
0755             rx_blocks->block_dma_addr = tmp_p_addr;
0756             rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
0757             if (!rx_blocks->rxds)
0758                 return -ENOMEM;
0759             mem_allocated += size;
0760             for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
0761                 rx_blocks->rxds[l].virt_addr =
0762                     rx_blocks->block_virt_addr +
0763                     (rxd_size[nic->rxd_mode] * l);
0764                 rx_blocks->rxds[l].dma_addr =
0765                     rx_blocks->block_dma_addr +
0766                     (rxd_size[nic->rxd_mode] * l);
0767             }
0768         }
0769         /* Interlinking all Rx Blocks */
0770         for (j = 0; j < blk_cnt; j++) {
0771             int next = (j + 1) % blk_cnt;
0772             tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
0773             tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
0774             tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
0775             tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
0776 
0777             pre_rxd_blk = tmp_v_addr;
0778             pre_rxd_blk->reserved_2_pNext_RxD_block =
0779                 (unsigned long)tmp_v_addr_next;
0780             pre_rxd_blk->pNext_RxD_Blk_physical =
0781                 (u64)tmp_p_addr_next;
0782         }
0783     }
0784     if (nic->rxd_mode == RXD_MODE_3B) {
0785         /*
0786          * Allocation of Storages for buffer addresses in 2BUFF mode
0787          * and the buffers as well.
0788          */
0789         for (i = 0; i < config->rx_ring_num; i++) {
0790             struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
0791             struct ring_info *ring = &mac_control->rings[i];
0792 
0793             blk_cnt = rx_cfg->num_rxd /
0794                 (rxd_count[nic->rxd_mode] + 1);
0795             size = sizeof(struct buffAdd *) * blk_cnt;
0796             ring->ba = kmalloc(size, GFP_KERNEL);
0797             if (!ring->ba)
0798                 return -ENOMEM;
0799             mem_allocated += size;
0800             for (j = 0; j < blk_cnt; j++) {
0801                 int k = 0;
0802 
0803                 size = sizeof(struct buffAdd) *
0804                     (rxd_count[nic->rxd_mode] + 1);
0805                 ring->ba[j] = kmalloc(size, GFP_KERNEL);
0806                 if (!ring->ba[j])
0807                     return -ENOMEM;
0808                 mem_allocated += size;
0809                 while (k != rxd_count[nic->rxd_mode]) {
0810                     ba = &ring->ba[j][k];
0811                     size = BUF0_LEN + ALIGN_SIZE;
0812                     ba->ba_0_org = kmalloc(size, GFP_KERNEL);
0813                     if (!ba->ba_0_org)
0814                         return -ENOMEM;
0815                     mem_allocated += size;
0816                     tmp = (unsigned long)ba->ba_0_org;
0817                     tmp += ALIGN_SIZE;
0818                     tmp &= ~((unsigned long)ALIGN_SIZE);
0819                     ba->ba_0 = (void *)tmp;
0820 
0821                     size = BUF1_LEN + ALIGN_SIZE;
0822                     ba->ba_1_org = kmalloc(size, GFP_KERNEL);
0823                     if (!ba->ba_1_org)
0824                         return -ENOMEM;
0825                     mem_allocated += size;
0826                     tmp = (unsigned long)ba->ba_1_org;
0827                     tmp += ALIGN_SIZE;
0828                     tmp &= ~((unsigned long)ALIGN_SIZE);
0829                     ba->ba_1 = (void *)tmp;
0830                     k++;
0831                 }
0832             }
0833         }
0834     }
0835 
0836     /* Allocation and initialization of Statistics block */
0837     size = sizeof(struct stat_block);
0838     mac_control->stats_mem =
0839         dma_alloc_coherent(&nic->pdev->dev, size,
0840                    &mac_control->stats_mem_phy, GFP_KERNEL);
0841 
0842     if (!mac_control->stats_mem) {
0843         /*
0844          * In case of failure, free_shared_mem() is called, which
0845          * should free any memory that was alloced till the
0846          * failure happened.
0847          */
0848         return -ENOMEM;
0849     }
0850     mem_allocated += size;
0851     mac_control->stats_mem_sz = size;
0852 
0853     tmp_v_addr = mac_control->stats_mem;
0854     mac_control->stats_info = tmp_v_addr;
0855     memset(tmp_v_addr, 0, size);
0856     DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
0857         dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
0858     mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
0859     return SUCCESS;
0860 }
0861 
0862 /**
0863  * free_shared_mem - Free the allocated Memory
0864  * @nic:  Device private variable.
0865  * Description: This function is to free all memory locations allocated by
0866  * the init_shared_mem() function and return it to the kernel.
0867  */
0868 
0869 static void free_shared_mem(struct s2io_nic *nic)
0870 {
0871     int i, j, blk_cnt, size;
0872     void *tmp_v_addr;
0873     dma_addr_t tmp_p_addr;
0874     int lst_size, lst_per_page;
0875     struct net_device *dev;
0876     int page_num = 0;
0877     struct config_param *config;
0878     struct mac_info *mac_control;
0879     struct stat_block *stats;
0880     struct swStat *swstats;
0881 
0882     if (!nic)
0883         return;
0884 
0885     dev = nic->dev;
0886 
0887     config = &nic->config;
0888     mac_control = &nic->mac_control;
0889     stats = mac_control->stats_info;
0890     swstats = &stats->sw_stat;
0891 
0892     lst_size = sizeof(struct TxD) * config->max_txds;
0893     lst_per_page = PAGE_SIZE / lst_size;
0894 
0895     for (i = 0; i < config->tx_fifo_num; i++) {
0896         struct fifo_info *fifo = &mac_control->fifos[i];
0897         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0898 
0899         page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
0900         for (j = 0; j < page_num; j++) {
0901             int mem_blks = (j * lst_per_page);
0902             struct list_info_hold *fli;
0903 
0904             if (!fifo->list_info)
0905                 return;
0906 
0907             fli = &fifo->list_info[mem_blks];
0908             if (!fli->list_virt_addr)
0909                 break;
0910             dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
0911                       fli->list_virt_addr,
0912                       fli->list_phy_addr);
0913             swstats->mem_freed += PAGE_SIZE;
0914         }
0915         /* If we got a zero DMA address during allocation,
0916          * free the page now
0917          */
0918         if (mac_control->zerodma_virt_addr) {
0919             dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
0920                       mac_control->zerodma_virt_addr,
0921                       (dma_addr_t)0);
0922             DBG_PRINT(INIT_DBG,
0923                   "%s: Freeing TxDL with zero DMA address. "
0924                   "Virtual address %p\n",
0925                   dev->name, mac_control->zerodma_virt_addr);
0926             swstats->mem_freed += PAGE_SIZE;
0927         }
0928         kfree(fifo->list_info);
0929         swstats->mem_freed += tx_cfg->fifo_len *
0930             sizeof(struct list_info_hold);
0931     }
0932 
0933     size = SIZE_OF_BLOCK;
0934     for (i = 0; i < config->rx_ring_num; i++) {
0935         struct ring_info *ring = &mac_control->rings[i];
0936 
0937         blk_cnt = ring->block_count;
0938         for (j = 0; j < blk_cnt; j++) {
0939             tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
0940             tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
0941             if (tmp_v_addr == NULL)
0942                 break;
0943             dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
0944                       tmp_p_addr);
0945             swstats->mem_freed += size;
0946             kfree(ring->rx_blocks[j].rxds);
0947             swstats->mem_freed += sizeof(struct rxd_info) *
0948                 rxd_count[nic->rxd_mode];
0949         }
0950     }
0951 
0952     if (nic->rxd_mode == RXD_MODE_3B) {
0953         /* Freeing buffer storage addresses in 2BUFF mode. */
0954         for (i = 0; i < config->rx_ring_num; i++) {
0955             struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
0956             struct ring_info *ring = &mac_control->rings[i];
0957 
0958             blk_cnt = rx_cfg->num_rxd /
0959                 (rxd_count[nic->rxd_mode] + 1);
0960             for (j = 0; j < blk_cnt; j++) {
0961                 int k = 0;
0962                 if (!ring->ba[j])
0963                     continue;
0964                 while (k != rxd_count[nic->rxd_mode]) {
0965                     struct buffAdd *ba = &ring->ba[j][k];
0966                     kfree(ba->ba_0_org);
0967                     swstats->mem_freed +=
0968                         BUF0_LEN + ALIGN_SIZE;
0969                     kfree(ba->ba_1_org);
0970                     swstats->mem_freed +=
0971                         BUF1_LEN + ALIGN_SIZE;
0972                     k++;
0973                 }
0974                 kfree(ring->ba[j]);
0975                 swstats->mem_freed += sizeof(struct buffAdd) *
0976                     (rxd_count[nic->rxd_mode] + 1);
0977             }
0978             kfree(ring->ba);
0979             swstats->mem_freed += sizeof(struct buffAdd *) *
0980                 blk_cnt;
0981         }
0982     }
0983 
0984     for (i = 0; i < nic->config.tx_fifo_num; i++) {
0985         struct fifo_info *fifo = &mac_control->fifos[i];
0986         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
0987 
0988         if (fifo->ufo_in_band_v) {
0989             swstats->mem_freed += tx_cfg->fifo_len *
0990                 sizeof(u64);
0991             kfree(fifo->ufo_in_band_v);
0992         }
0993     }
0994 
0995     if (mac_control->stats_mem) {
0996         swstats->mem_freed += mac_control->stats_mem_sz;
0997         dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
0998                   mac_control->stats_mem,
0999                   mac_control->stats_mem_phy);
1000     }
1001 }
1002 
1003 /*
1004  * s2io_verify_pci_mode -
1005  */
1006 
1007 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008 {
1009     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010     register u64 val64 = 0;
1011     int     mode;
1012 
1013     val64 = readq(&bar0->pci_mode);
1014     mode = (u8)GET_PCI_MODE(val64);
1015 
1016     if (val64 & PCI_MODE_UNKNOWN_MODE)
1017         return -1;      /* Unknown PCI mode */
1018     return mode;
1019 }
1020 
1021 #define NEC_VENID   0x1033
1022 #define NEC_DEVID   0x0125
1023 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024 {
1025     struct pci_dev *tdev = NULL;
1026     for_each_pci_dev(tdev) {
1027         if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028             if (tdev->bus == s2io_pdev->bus->parent) {
1029                 pci_dev_put(tdev);
1030                 return 1;
1031             }
1032         }
1033     }
1034     return 0;
1035 }
1036 
1037 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038 /*
1039  * s2io_print_pci_mode -
1040  */
1041 static int s2io_print_pci_mode(struct s2io_nic *nic)
1042 {
1043     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044     register u64 val64 = 0;
1045     int mode;
1046     struct config_param *config = &nic->config;
1047     const char *pcimode;
1048 
1049     val64 = readq(&bar0->pci_mode);
1050     mode = (u8)GET_PCI_MODE(val64);
1051 
1052     if (val64 & PCI_MODE_UNKNOWN_MODE)
1053         return -1;  /* Unknown PCI mode */
1054 
1055     config->bus_speed = bus_speed[mode];
1056 
1057     if (s2io_on_nec_bridge(nic->pdev)) {
1058         DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059               nic->dev->name);
1060         return mode;
1061     }
1062 
1063     switch (mode) {
1064     case PCI_MODE_PCI_33:
1065         pcimode = "33MHz PCI bus";
1066         break;
1067     case PCI_MODE_PCI_66:
1068         pcimode = "66MHz PCI bus";
1069         break;
1070     case PCI_MODE_PCIX_M1_66:
1071         pcimode = "66MHz PCIX(M1) bus";
1072         break;
1073     case PCI_MODE_PCIX_M1_100:
1074         pcimode = "100MHz PCIX(M1) bus";
1075         break;
1076     case PCI_MODE_PCIX_M1_133:
1077         pcimode = "133MHz PCIX(M1) bus";
1078         break;
1079     case PCI_MODE_PCIX_M2_66:
1080         pcimode = "133MHz PCIX(M2) bus";
1081         break;
1082     case PCI_MODE_PCIX_M2_100:
1083         pcimode = "200MHz PCIX(M2) bus";
1084         break;
1085     case PCI_MODE_PCIX_M2_133:
1086         pcimode = "266MHz PCIX(M2) bus";
1087         break;
1088     default:
1089         pcimode = "unsupported bus!";
1090         mode = -1;
1091     }
1092 
1093     DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094           nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095 
1096     return mode;
1097 }
1098 
1099 /**
1100  *  init_tti - Initialization transmit traffic interrupt scheme
1101  *  @nic: device private variable
1102  *  @link: link status (UP/DOWN) used to enable/disable continuous
1103  *  transmit interrupts
1104  *  @may_sleep: parameter indicates if sleeping when waiting for
1105  *  command complete
1106  *  Description: The function configures transmit traffic interrupts
1107  *  Return Value:  SUCCESS on success and
1108  *  '-1' on failure
1109  */
1110 
1111 static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
1112 {
1113     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114     register u64 val64 = 0;
1115     int i;
1116     struct config_param *config = &nic->config;
1117 
1118     for (i = 0; i < config->tx_fifo_num; i++) {
1119         /*
1120          * TTI Initialization. Default Tx timer gets us about
1121          * 250 interrupts per sec. Continuous interrupts are enabled
1122          * by default.
1123          */
1124         if (nic->device_type == XFRAME_II_DEVICE) {
1125             int count = (nic->config.bus_speed * 125)/2;
1126             val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1127         } else
1128             val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1129 
1130         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1131             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1132             TTI_DATA1_MEM_TX_URNG_C(0x30) |
1133             TTI_DATA1_MEM_TX_TIMER_AC_EN;
1134         if (i == 0)
1135             if (use_continuous_tx_intrs && (link == LINK_UP))
1136                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1137         writeq(val64, &bar0->tti_data1_mem);
1138 
1139         if (nic->config.intr_type == MSI_X) {
1140             val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1141                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1142                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1143                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1144         } else {
1145             if ((nic->config.tx_steering_type ==
1146                  TX_DEFAULT_STEERING) &&
1147                 (config->tx_fifo_num > 1) &&
1148                 (i >= nic->udp_fifo_idx) &&
1149                 (i < (nic->udp_fifo_idx +
1150                   nic->total_udp_fifos)))
1151                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1152                     TTI_DATA2_MEM_TX_UFC_B(0x80) |
1153                     TTI_DATA2_MEM_TX_UFC_C(0x100) |
1154                     TTI_DATA2_MEM_TX_UFC_D(0x120);
1155             else
1156                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157                     TTI_DATA2_MEM_TX_UFC_B(0x20) |
1158                     TTI_DATA2_MEM_TX_UFC_C(0x40) |
1159                     TTI_DATA2_MEM_TX_UFC_D(0x80);
1160         }
1161 
1162         writeq(val64, &bar0->tti_data2_mem);
1163 
1164         val64 = TTI_CMD_MEM_WE |
1165             TTI_CMD_MEM_STROBE_NEW_CMD |
1166             TTI_CMD_MEM_OFFSET(i);
1167         writeq(val64, &bar0->tti_command_mem);
1168 
1169         if (wait_for_cmd_complete(&bar0->tti_command_mem,
1170                       TTI_CMD_MEM_STROBE_NEW_CMD,
1171                       S2IO_BIT_RESET, may_sleep) != SUCCESS)
1172             return FAILURE;
1173     }
1174 
1175     return SUCCESS;
1176 }
1177 
1178 /**
1179  *  init_nic - Initialization of hardware
1180  *  @nic: device private variable
1181  *  Description: The function sequentially configures every block
1182  *  of the H/W from their reset values.
1183  *  Return Value:  SUCCESS on success and
1184  *  '-1' on failure (endian settings incorrect).
1185  */
1186 
1187 static int init_nic(struct s2io_nic *nic)
1188 {
1189     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1190     struct net_device *dev = nic->dev;
1191     register u64 val64 = 0;
1192     void __iomem *add;
1193     u32 time;
1194     int i, j;
1195     int dtx_cnt = 0;
1196     unsigned long long mem_share;
1197     int mem_size;
1198     struct config_param *config = &nic->config;
1199     struct mac_info *mac_control = &nic->mac_control;
1200 
1201     /* to set the swapper controle on the card */
1202     if (s2io_set_swapper(nic)) {
1203         DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1204         return -EIO;
1205     }
1206 
1207     /*
1208      * Herc requires EOI to be removed from reset before XGXS, so..
1209      */
1210     if (nic->device_type & XFRAME_II_DEVICE) {
1211         val64 = 0xA500000000ULL;
1212         writeq(val64, &bar0->sw_reset);
1213         msleep(500);
1214         val64 = readq(&bar0->sw_reset);
1215     }
1216 
1217     /* Remove XGXS from reset state */
1218     val64 = 0;
1219     writeq(val64, &bar0->sw_reset);
1220     msleep(500);
1221     val64 = readq(&bar0->sw_reset);
1222 
1223     /* Ensure that it's safe to access registers by checking
1224      * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1225      */
1226     if (nic->device_type == XFRAME_II_DEVICE) {
1227         for (i = 0; i < 50; i++) {
1228             val64 = readq(&bar0->adapter_status);
1229             if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1230                 break;
1231             msleep(10);
1232         }
1233         if (i == 50)
1234             return -ENODEV;
1235     }
1236 
1237     /*  Enable Receiving broadcasts */
1238     add = &bar0->mac_cfg;
1239     val64 = readq(&bar0->mac_cfg);
1240     val64 |= MAC_RMAC_BCAST_ENABLE;
1241     writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242     writel((u32)val64, add);
1243     writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1244     writel((u32) (val64 >> 32), (add + 4));
1245 
1246     /* Read registers in all blocks */
1247     val64 = readq(&bar0->mac_int_mask);
1248     val64 = readq(&bar0->mc_int_mask);
1249     val64 = readq(&bar0->xgxs_int_mask);
1250 
1251     /*  Set MTU */
1252     val64 = dev->mtu;
1253     writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1254 
1255     if (nic->device_type & XFRAME_II_DEVICE) {
1256         while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1257             SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1258                       &bar0->dtx_control, UF);
1259             if (dtx_cnt & 0x1)
1260                 msleep(1); /* Necessary!! */
1261             dtx_cnt++;
1262         }
1263     } else {
1264         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1265             SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1266                       &bar0->dtx_control, UF);
1267             val64 = readq(&bar0->dtx_control);
1268             dtx_cnt++;
1269         }
1270     }
1271 
1272     /*  Tx DMA Initialization */
1273     val64 = 0;
1274     writeq(val64, &bar0->tx_fifo_partition_0);
1275     writeq(val64, &bar0->tx_fifo_partition_1);
1276     writeq(val64, &bar0->tx_fifo_partition_2);
1277     writeq(val64, &bar0->tx_fifo_partition_3);
1278 
1279     for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1280         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1281 
1282         val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1283             vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1284 
1285         if (i == (config->tx_fifo_num - 1)) {
1286             if (i % 2 == 0)
1287                 i++;
1288         }
1289 
1290         switch (i) {
1291         case 1:
1292             writeq(val64, &bar0->tx_fifo_partition_0);
1293             val64 = 0;
1294             j = 0;
1295             break;
1296         case 3:
1297             writeq(val64, &bar0->tx_fifo_partition_1);
1298             val64 = 0;
1299             j = 0;
1300             break;
1301         case 5:
1302             writeq(val64, &bar0->tx_fifo_partition_2);
1303             val64 = 0;
1304             j = 0;
1305             break;
1306         case 7:
1307             writeq(val64, &bar0->tx_fifo_partition_3);
1308             val64 = 0;
1309             j = 0;
1310             break;
1311         default:
1312             j++;
1313             break;
1314         }
1315     }
1316 
1317     /*
1318      * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1319      * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1320      */
1321     if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1322         writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1323 
1324     val64 = readq(&bar0->tx_fifo_partition_0);
1325     DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1326           &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1327 
1328     /*
1329      * Initialization of Tx_PA_CONFIG register to ignore packet
1330      * integrity checking.
1331      */
1332     val64 = readq(&bar0->tx_pa_cfg);
1333     val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1334         TX_PA_CFG_IGNORE_SNAP_OUI |
1335         TX_PA_CFG_IGNORE_LLC_CTRL |
1336         TX_PA_CFG_IGNORE_L2_ERR;
1337     writeq(val64, &bar0->tx_pa_cfg);
1338 
1339     /* Rx DMA initialization. */
1340     val64 = 0;
1341     for (i = 0; i < config->rx_ring_num; i++) {
1342         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1343 
1344         val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1345     }
1346     writeq(val64, &bar0->rx_queue_priority);
1347 
1348     /*
1349      * Allocating equal share of memory to all the
1350      * configured Rings.
1351      */
1352     val64 = 0;
1353     if (nic->device_type & XFRAME_II_DEVICE)
1354         mem_size = 32;
1355     else
1356         mem_size = 64;
1357 
1358     for (i = 0; i < config->rx_ring_num; i++) {
1359         switch (i) {
1360         case 0:
1361             mem_share = (mem_size / config->rx_ring_num +
1362                      mem_size % config->rx_ring_num);
1363             val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1364             continue;
1365         case 1:
1366             mem_share = (mem_size / config->rx_ring_num);
1367             val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1368             continue;
1369         case 2:
1370             mem_share = (mem_size / config->rx_ring_num);
1371             val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1372             continue;
1373         case 3:
1374             mem_share = (mem_size / config->rx_ring_num);
1375             val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1376             continue;
1377         case 4:
1378             mem_share = (mem_size / config->rx_ring_num);
1379             val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1380             continue;
1381         case 5:
1382             mem_share = (mem_size / config->rx_ring_num);
1383             val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1384             continue;
1385         case 6:
1386             mem_share = (mem_size / config->rx_ring_num);
1387             val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1388             continue;
1389         case 7:
1390             mem_share = (mem_size / config->rx_ring_num);
1391             val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1392             continue;
1393         }
1394     }
1395     writeq(val64, &bar0->rx_queue_cfg);
1396 
1397     /*
1398      * Filling Tx round robin registers
1399      * as per the number of FIFOs for equal scheduling priority
1400      */
1401     switch (config->tx_fifo_num) {
1402     case 1:
1403         val64 = 0x0;
1404         writeq(val64, &bar0->tx_w_round_robin_0);
1405         writeq(val64, &bar0->tx_w_round_robin_1);
1406         writeq(val64, &bar0->tx_w_round_robin_2);
1407         writeq(val64, &bar0->tx_w_round_robin_3);
1408         writeq(val64, &bar0->tx_w_round_robin_4);
1409         break;
1410     case 2:
1411         val64 = 0x0001000100010001ULL;
1412         writeq(val64, &bar0->tx_w_round_robin_0);
1413         writeq(val64, &bar0->tx_w_round_robin_1);
1414         writeq(val64, &bar0->tx_w_round_robin_2);
1415         writeq(val64, &bar0->tx_w_round_robin_3);
1416         val64 = 0x0001000100000000ULL;
1417         writeq(val64, &bar0->tx_w_round_robin_4);
1418         break;
1419     case 3:
1420         val64 = 0x0001020001020001ULL;
1421         writeq(val64, &bar0->tx_w_round_robin_0);
1422         val64 = 0x0200010200010200ULL;
1423         writeq(val64, &bar0->tx_w_round_robin_1);
1424         val64 = 0x0102000102000102ULL;
1425         writeq(val64, &bar0->tx_w_round_robin_2);
1426         val64 = 0x0001020001020001ULL;
1427         writeq(val64, &bar0->tx_w_round_robin_3);
1428         val64 = 0x0200010200000000ULL;
1429         writeq(val64, &bar0->tx_w_round_robin_4);
1430         break;
1431     case 4:
1432         val64 = 0x0001020300010203ULL;
1433         writeq(val64, &bar0->tx_w_round_robin_0);
1434         writeq(val64, &bar0->tx_w_round_robin_1);
1435         writeq(val64, &bar0->tx_w_round_robin_2);
1436         writeq(val64, &bar0->tx_w_round_robin_3);
1437         val64 = 0x0001020300000000ULL;
1438         writeq(val64, &bar0->tx_w_round_robin_4);
1439         break;
1440     case 5:
1441         val64 = 0x0001020304000102ULL;
1442         writeq(val64, &bar0->tx_w_round_robin_0);
1443         val64 = 0x0304000102030400ULL;
1444         writeq(val64, &bar0->tx_w_round_robin_1);
1445         val64 = 0x0102030400010203ULL;
1446         writeq(val64, &bar0->tx_w_round_robin_2);
1447         val64 = 0x0400010203040001ULL;
1448         writeq(val64, &bar0->tx_w_round_robin_3);
1449         val64 = 0x0203040000000000ULL;
1450         writeq(val64, &bar0->tx_w_round_robin_4);
1451         break;
1452     case 6:
1453         val64 = 0x0001020304050001ULL;
1454         writeq(val64, &bar0->tx_w_round_robin_0);
1455         val64 = 0x0203040500010203ULL;
1456         writeq(val64, &bar0->tx_w_round_robin_1);
1457         val64 = 0x0405000102030405ULL;
1458         writeq(val64, &bar0->tx_w_round_robin_2);
1459         val64 = 0x0001020304050001ULL;
1460         writeq(val64, &bar0->tx_w_round_robin_3);
1461         val64 = 0x0203040500000000ULL;
1462         writeq(val64, &bar0->tx_w_round_robin_4);
1463         break;
1464     case 7:
1465         val64 = 0x0001020304050600ULL;
1466         writeq(val64, &bar0->tx_w_round_robin_0);
1467         val64 = 0x0102030405060001ULL;
1468         writeq(val64, &bar0->tx_w_round_robin_1);
1469         val64 = 0x0203040506000102ULL;
1470         writeq(val64, &bar0->tx_w_round_robin_2);
1471         val64 = 0x0304050600010203ULL;
1472         writeq(val64, &bar0->tx_w_round_robin_3);
1473         val64 = 0x0405060000000000ULL;
1474         writeq(val64, &bar0->tx_w_round_robin_4);
1475         break;
1476     case 8:
1477         val64 = 0x0001020304050607ULL;
1478         writeq(val64, &bar0->tx_w_round_robin_0);
1479         writeq(val64, &bar0->tx_w_round_robin_1);
1480         writeq(val64, &bar0->tx_w_round_robin_2);
1481         writeq(val64, &bar0->tx_w_round_robin_3);
1482         val64 = 0x0001020300000000ULL;
1483         writeq(val64, &bar0->tx_w_round_robin_4);
1484         break;
1485     }
1486 
1487     /* Enable all configured Tx FIFO partitions */
1488     val64 = readq(&bar0->tx_fifo_partition_0);
1489     val64 |= (TX_FIFO_PARTITION_EN);
1490     writeq(val64, &bar0->tx_fifo_partition_0);
1491 
1492     /* Filling the Rx round robin registers as per the
1493      * number of Rings and steering based on QoS with
1494      * equal priority.
1495      */
1496     switch (config->rx_ring_num) {
1497     case 1:
1498         val64 = 0x0;
1499         writeq(val64, &bar0->rx_w_round_robin_0);
1500         writeq(val64, &bar0->rx_w_round_robin_1);
1501         writeq(val64, &bar0->rx_w_round_robin_2);
1502         writeq(val64, &bar0->rx_w_round_robin_3);
1503         writeq(val64, &bar0->rx_w_round_robin_4);
1504 
1505         val64 = 0x8080808080808080ULL;
1506         writeq(val64, &bar0->rts_qos_steering);
1507         break;
1508     case 2:
1509         val64 = 0x0001000100010001ULL;
1510         writeq(val64, &bar0->rx_w_round_robin_0);
1511         writeq(val64, &bar0->rx_w_round_robin_1);
1512         writeq(val64, &bar0->rx_w_round_robin_2);
1513         writeq(val64, &bar0->rx_w_round_robin_3);
1514         val64 = 0x0001000100000000ULL;
1515         writeq(val64, &bar0->rx_w_round_robin_4);
1516 
1517         val64 = 0x8080808040404040ULL;
1518         writeq(val64, &bar0->rts_qos_steering);
1519         break;
1520     case 3:
1521         val64 = 0x0001020001020001ULL;
1522         writeq(val64, &bar0->rx_w_round_robin_0);
1523         val64 = 0x0200010200010200ULL;
1524         writeq(val64, &bar0->rx_w_round_robin_1);
1525         val64 = 0x0102000102000102ULL;
1526         writeq(val64, &bar0->rx_w_round_robin_2);
1527         val64 = 0x0001020001020001ULL;
1528         writeq(val64, &bar0->rx_w_round_robin_3);
1529         val64 = 0x0200010200000000ULL;
1530         writeq(val64, &bar0->rx_w_round_robin_4);
1531 
1532         val64 = 0x8080804040402020ULL;
1533         writeq(val64, &bar0->rts_qos_steering);
1534         break;
1535     case 4:
1536         val64 = 0x0001020300010203ULL;
1537         writeq(val64, &bar0->rx_w_round_robin_0);
1538         writeq(val64, &bar0->rx_w_round_robin_1);
1539         writeq(val64, &bar0->rx_w_round_robin_2);
1540         writeq(val64, &bar0->rx_w_round_robin_3);
1541         val64 = 0x0001020300000000ULL;
1542         writeq(val64, &bar0->rx_w_round_robin_4);
1543 
1544         val64 = 0x8080404020201010ULL;
1545         writeq(val64, &bar0->rts_qos_steering);
1546         break;
1547     case 5:
1548         val64 = 0x0001020304000102ULL;
1549         writeq(val64, &bar0->rx_w_round_robin_0);
1550         val64 = 0x0304000102030400ULL;
1551         writeq(val64, &bar0->rx_w_round_robin_1);
1552         val64 = 0x0102030400010203ULL;
1553         writeq(val64, &bar0->rx_w_round_robin_2);
1554         val64 = 0x0400010203040001ULL;
1555         writeq(val64, &bar0->rx_w_round_robin_3);
1556         val64 = 0x0203040000000000ULL;
1557         writeq(val64, &bar0->rx_w_round_robin_4);
1558 
1559         val64 = 0x8080404020201008ULL;
1560         writeq(val64, &bar0->rts_qos_steering);
1561         break;
1562     case 6:
1563         val64 = 0x0001020304050001ULL;
1564         writeq(val64, &bar0->rx_w_round_robin_0);
1565         val64 = 0x0203040500010203ULL;
1566         writeq(val64, &bar0->rx_w_round_robin_1);
1567         val64 = 0x0405000102030405ULL;
1568         writeq(val64, &bar0->rx_w_round_robin_2);
1569         val64 = 0x0001020304050001ULL;
1570         writeq(val64, &bar0->rx_w_round_robin_3);
1571         val64 = 0x0203040500000000ULL;
1572         writeq(val64, &bar0->rx_w_round_robin_4);
1573 
1574         val64 = 0x8080404020100804ULL;
1575         writeq(val64, &bar0->rts_qos_steering);
1576         break;
1577     case 7:
1578         val64 = 0x0001020304050600ULL;
1579         writeq(val64, &bar0->rx_w_round_robin_0);
1580         val64 = 0x0102030405060001ULL;
1581         writeq(val64, &bar0->rx_w_round_robin_1);
1582         val64 = 0x0203040506000102ULL;
1583         writeq(val64, &bar0->rx_w_round_robin_2);
1584         val64 = 0x0304050600010203ULL;
1585         writeq(val64, &bar0->rx_w_round_robin_3);
1586         val64 = 0x0405060000000000ULL;
1587         writeq(val64, &bar0->rx_w_round_robin_4);
1588 
1589         val64 = 0x8080402010080402ULL;
1590         writeq(val64, &bar0->rts_qos_steering);
1591         break;
1592     case 8:
1593         val64 = 0x0001020304050607ULL;
1594         writeq(val64, &bar0->rx_w_round_robin_0);
1595         writeq(val64, &bar0->rx_w_round_robin_1);
1596         writeq(val64, &bar0->rx_w_round_robin_2);
1597         writeq(val64, &bar0->rx_w_round_robin_3);
1598         val64 = 0x0001020300000000ULL;
1599         writeq(val64, &bar0->rx_w_round_robin_4);
1600 
1601         val64 = 0x8040201008040201ULL;
1602         writeq(val64, &bar0->rts_qos_steering);
1603         break;
1604     }
1605 
1606     /* UDP Fix */
1607     val64 = 0;
1608     for (i = 0; i < 8; i++)
1609         writeq(val64, &bar0->rts_frm_len_n[i]);
1610 
1611     /* Set the default rts frame length for the rings configured */
1612     val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1613     for (i = 0 ; i < config->rx_ring_num ; i++)
1614         writeq(val64, &bar0->rts_frm_len_n[i]);
1615 
1616     /* Set the frame length for the configured rings
1617      * desired by the user
1618      */
1619     for (i = 0; i < config->rx_ring_num; i++) {
1620         /* If rts_frm_len[i] == 0 then it is assumed that user not
1621          * specified frame length steering.
1622          * If the user provides the frame length then program
1623          * the rts_frm_len register for those values or else
1624          * leave it as it is.
1625          */
1626         if (rts_frm_len[i] != 0) {
1627             writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1628                    &bar0->rts_frm_len_n[i]);
1629         }
1630     }
1631 
1632     /* Disable differentiated services steering logic */
1633     for (i = 0; i < 64; i++) {
1634         if (rts_ds_steer(nic, i, 0) == FAILURE) {
1635             DBG_PRINT(ERR_DBG,
1636                   "%s: rts_ds_steer failed on codepoint %d\n",
1637                   dev->name, i);
1638             return -ENODEV;
1639         }
1640     }
1641 
1642     /* Program statistics memory */
1643     writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1644 
1645     if (nic->device_type == XFRAME_II_DEVICE) {
1646         val64 = STAT_BC(0x320);
1647         writeq(val64, &bar0->stat_byte_cnt);
1648     }
1649 
1650     /*
1651      * Initializing the sampling rate for the device to calculate the
1652      * bandwidth utilization.
1653      */
1654     val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1655         MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1656     writeq(val64, &bar0->mac_link_util);
1657 
1658     /*
1659      * Initializing the Transmit and Receive Traffic Interrupt
1660      * Scheme.
1661      */
1662 
1663     /* Initialize TTI */
1664     if (SUCCESS != init_tti(nic, nic->last_link_state, true))
1665         return -ENODEV;
1666 
1667     /* RTI Initialization */
1668     if (nic->device_type == XFRAME_II_DEVICE) {
1669         /*
1670          * Programmed to generate Apprx 500 Intrs per
1671          * second
1672          */
1673         int count = (nic->config.bus_speed * 125)/4;
1674         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1675     } else
1676         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1677     val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1678         RTI_DATA1_MEM_RX_URNG_B(0x10) |
1679         RTI_DATA1_MEM_RX_URNG_C(0x30) |
1680         RTI_DATA1_MEM_RX_TIMER_AC_EN;
1681 
1682     writeq(val64, &bar0->rti_data1_mem);
1683 
1684     val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1685         RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1686     if (nic->config.intr_type == MSI_X)
1687         val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1688               RTI_DATA2_MEM_RX_UFC_D(0x40));
1689     else
1690         val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1691               RTI_DATA2_MEM_RX_UFC_D(0x80));
1692     writeq(val64, &bar0->rti_data2_mem);
1693 
1694     for (i = 0; i < config->rx_ring_num; i++) {
1695         val64 = RTI_CMD_MEM_WE |
1696             RTI_CMD_MEM_STROBE_NEW_CMD |
1697             RTI_CMD_MEM_OFFSET(i);
1698         writeq(val64, &bar0->rti_command_mem);
1699 
1700         /*
1701          * Once the operation completes, the Strobe bit of the
1702          * command register will be reset. We poll for this
1703          * particular condition. We wait for a maximum of 500ms
1704          * for the operation to complete, if it's not complete
1705          * by then we return error.
1706          */
1707         time = 0;
1708         while (true) {
1709             val64 = readq(&bar0->rti_command_mem);
1710             if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1711                 break;
1712 
1713             if (time > 10) {
1714                 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1715                       dev->name);
1716                 return -ENODEV;
1717             }
1718             time++;
1719             msleep(50);
1720         }
1721     }
1722 
1723     /*
1724      * Initializing proper values as Pause threshold into all
1725      * the 8 Queues on Rx side.
1726      */
1727     writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1728     writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1729 
1730     /* Disable RMAC PAD STRIPPING */
1731     add = &bar0->mac_cfg;
1732     val64 = readq(&bar0->mac_cfg);
1733     val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1734     writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735     writel((u32) (val64), add);
1736     writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1737     writel((u32) (val64 >> 32), (add + 4));
1738     val64 = readq(&bar0->mac_cfg);
1739 
1740     /* Enable FCS stripping by adapter */
1741     add = &bar0->mac_cfg;
1742     val64 = readq(&bar0->mac_cfg);
1743     val64 |= MAC_CFG_RMAC_STRIP_FCS;
1744     if (nic->device_type == XFRAME_II_DEVICE)
1745         writeq(val64, &bar0->mac_cfg);
1746     else {
1747         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748         writel((u32) (val64), add);
1749         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750         writel((u32) (val64 >> 32), (add + 4));
1751     }
1752 
1753     /*
1754      * Set the time value to be inserted in the pause frame
1755      * generated by xena.
1756      */
1757     val64 = readq(&bar0->rmac_pause_cfg);
1758     val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1759     val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1760     writeq(val64, &bar0->rmac_pause_cfg);
1761 
1762     /*
1763      * Set the Threshold Limit for Generating the pause frame
1764      * If the amount of data in any Queue exceeds ratio of
1765      * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1766      * pause frame is generated
1767      */
1768     val64 = 0;
1769     for (i = 0; i < 4; i++) {
1770         val64 |= (((u64)0xFF00 |
1771                nic->mac_control.mc_pause_threshold_q0q3)
1772               << (i * 2 * 8));
1773     }
1774     writeq(val64, &bar0->mc_pause_thresh_q0q3);
1775 
1776     val64 = 0;
1777     for (i = 0; i < 4; i++) {
1778         val64 |= (((u64)0xFF00 |
1779                nic->mac_control.mc_pause_threshold_q4q7)
1780               << (i * 2 * 8));
1781     }
1782     writeq(val64, &bar0->mc_pause_thresh_q4q7);
1783 
1784     /*
1785      * TxDMA will stop Read request if the number of read split has
1786      * exceeded the limit pointed by shared_splits
1787      */
1788     val64 = readq(&bar0->pic_control);
1789     val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1790     writeq(val64, &bar0->pic_control);
1791 
1792     if (nic->config.bus_speed == 266) {
1793         writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1794         writeq(0x0, &bar0->read_retry_delay);
1795         writeq(0x0, &bar0->write_retry_delay);
1796     }
1797 
1798     /*
1799      * Programming the Herc to split every write transaction
1800      * that does not start on an ADB to reduce disconnects.
1801      */
1802     if (nic->device_type == XFRAME_II_DEVICE) {
1803         val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1804             MISC_LINK_STABILITY_PRD(3);
1805         writeq(val64, &bar0->misc_control);
1806         val64 = readq(&bar0->pic_control2);
1807         val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1808         writeq(val64, &bar0->pic_control2);
1809     }
1810     if (strstr(nic->product_name, "CX4")) {
1811         val64 = TMAC_AVG_IPG(0x17);
1812         writeq(val64, &bar0->tmac_avg_ipg);
1813     }
1814 
1815     return SUCCESS;
1816 }
1817 #define LINK_UP_DOWN_INTERRUPT      1
1818 #define MAC_RMAC_ERR_TIMER      2
1819 
1820 static int s2io_link_fault_indication(struct s2io_nic *nic)
1821 {
1822     if (nic->device_type == XFRAME_II_DEVICE)
1823         return LINK_UP_DOWN_INTERRUPT;
1824     else
1825         return MAC_RMAC_ERR_TIMER;
1826 }
1827 
1828 /**
1829  *  do_s2io_write_bits -  update alarm bits in alarm register
1830  *  @value: alarm bits
1831  *  @flag: interrupt status
1832  *  @addr: address value
1833  *  Description: update alarm bits in alarm register
1834  *  Return Value:
1835  *  NONE.
1836  */
1837 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1838 {
1839     u64 temp64;
1840 
1841     temp64 = readq(addr);
1842 
1843     if (flag == ENABLE_INTRS)
1844         temp64 &= ~((u64)value);
1845     else
1846         temp64 |= ((u64)value);
1847     writeq(temp64, addr);
1848 }
1849 
1850 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1851 {
1852     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1853     register u64 gen_int_mask = 0;
1854     u64 interruptible;
1855 
1856     writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1857     if (mask & TX_DMA_INTR) {
1858         gen_int_mask |= TXDMA_INT_M;
1859 
1860         do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1861                    TXDMA_PCC_INT | TXDMA_TTI_INT |
1862                    TXDMA_LSO_INT | TXDMA_TPA_INT |
1863                    TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1864 
1865         do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1866                    PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1867                    PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1868                    &bar0->pfc_err_mask);
1869 
1870         do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1871                    TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1872                    TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1873 
1874         do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1875                    PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1876                    PCC_N_SERR | PCC_6_COF_OV_ERR |
1877                    PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1878                    PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1879                    PCC_TXB_ECC_SG_ERR,
1880                    flag, &bar0->pcc_err_mask);
1881 
1882         do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1883                    TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1884 
1885         do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1886                    LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1887                    LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1888                    flag, &bar0->lso_err_mask);
1889 
1890         do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1891                    flag, &bar0->tpa_err_mask);
1892 
1893         do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1894     }
1895 
1896     if (mask & TX_MAC_INTR) {
1897         gen_int_mask |= TXMAC_INT_M;
1898         do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1899                    &bar0->mac_int_mask);
1900         do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1901                    TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1902                    TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1903                    flag, &bar0->mac_tmac_err_mask);
1904     }
1905 
1906     if (mask & TX_XGXS_INTR) {
1907         gen_int_mask |= TXXGXS_INT_M;
1908         do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1909                    &bar0->xgxs_int_mask);
1910         do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1911                    TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1912                    flag, &bar0->xgxs_txgxs_err_mask);
1913     }
1914 
1915     if (mask & RX_DMA_INTR) {
1916         gen_int_mask |= RXDMA_INT_M;
1917         do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1918                    RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1919                    flag, &bar0->rxdma_int_mask);
1920         do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1921                    RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1922                    RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1923                    RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1924         do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1925                    PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1926                    PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1927                    &bar0->prc_pcix_err_mask);
1928         do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1929                    RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1930                    &bar0->rpa_err_mask);
1931         do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1932                    RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1933                    RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1934                    RDA_FRM_ECC_SG_ERR |
1935                    RDA_MISC_ERR|RDA_PCIX_ERR,
1936                    flag, &bar0->rda_err_mask);
1937         do_s2io_write_bits(RTI_SM_ERR_ALARM |
1938                    RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1939                    flag, &bar0->rti_err_mask);
1940     }
1941 
1942     if (mask & RX_MAC_INTR) {
1943         gen_int_mask |= RXMAC_INT_M;
1944         do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1945                    &bar0->mac_int_mask);
1946         interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1947                  RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1948                  RMAC_DOUBLE_ECC_ERR);
1949         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1950             interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1951         do_s2io_write_bits(interruptible,
1952                    flag, &bar0->mac_rmac_err_mask);
1953     }
1954 
1955     if (mask & RX_XGXS_INTR) {
1956         gen_int_mask |= RXXGXS_INT_M;
1957         do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1958                    &bar0->xgxs_int_mask);
1959         do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1960                    &bar0->xgxs_rxgxs_err_mask);
1961     }
1962 
1963     if (mask & MC_INTR) {
1964         gen_int_mask |= MC_INT_M;
1965         do_s2io_write_bits(MC_INT_MASK_MC_INT,
1966                    flag, &bar0->mc_int_mask);
1967         do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1968                    MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1969                    &bar0->mc_err_mask);
1970     }
1971     nic->general_int_mask = gen_int_mask;
1972 
1973     /* Remove this line when alarm interrupts are enabled */
1974     nic->general_int_mask = 0;
1975 }
1976 
1977 /**
1978  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1979  *  @nic: device private variable,
1980  *  @mask: A mask indicating which Intr block must be modified and,
1981  *  @flag: A flag indicating whether to enable or disable the Intrs.
1982  *  Description: This function will either disable or enable the interrupts
1983  *  depending on the flag argument. The mask argument can be used to
1984  *  enable/disable any Intr block.
1985  *  Return Value: NONE.
1986  */
1987 
1988 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1989 {
1990     struct XENA_dev_config __iomem *bar0 = nic->bar0;
1991     register u64 temp64 = 0, intr_mask = 0;
1992 
1993     intr_mask = nic->general_int_mask;
1994 
1995     /*  Top level interrupt classification */
1996     /*  PIC Interrupts */
1997     if (mask & TX_PIC_INTR) {
1998         /*  Enable PIC Intrs in the general intr mask register */
1999         intr_mask |= TXPIC_INT_M;
2000         if (flag == ENABLE_INTRS) {
2001             /*
2002              * If Hercules adapter enable GPIO otherwise
2003              * disable all PCIX, Flash, MDIO, IIC and GPIO
2004              * interrupts for now.
2005              * TODO
2006              */
2007             if (s2io_link_fault_indication(nic) ==
2008                 LINK_UP_DOWN_INTERRUPT) {
2009                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2010                            &bar0->pic_int_mask);
2011                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2012                            &bar0->gpio_int_mask);
2013             } else
2014                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2015         } else if (flag == DISABLE_INTRS) {
2016             /*
2017              * Disable PIC Intrs in the general
2018              * intr mask register
2019              */
2020             writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2021         }
2022     }
2023 
2024     /*  Tx traffic interrupts */
2025     if (mask & TX_TRAFFIC_INTR) {
2026         intr_mask |= TXTRAFFIC_INT_M;
2027         if (flag == ENABLE_INTRS) {
2028             /*
2029              * Enable all the Tx side interrupts
2030              * writing 0 Enables all 64 TX interrupt levels
2031              */
2032             writeq(0x0, &bar0->tx_traffic_mask);
2033         } else if (flag == DISABLE_INTRS) {
2034             /*
2035              * Disable Tx Traffic Intrs in the general intr mask
2036              * register.
2037              */
2038             writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2039         }
2040     }
2041 
2042     /*  Rx traffic interrupts */
2043     if (mask & RX_TRAFFIC_INTR) {
2044         intr_mask |= RXTRAFFIC_INT_M;
2045         if (flag == ENABLE_INTRS) {
2046             /* writing 0 Enables all 8 RX interrupt levels */
2047             writeq(0x0, &bar0->rx_traffic_mask);
2048         } else if (flag == DISABLE_INTRS) {
2049             /*
2050              * Disable Rx Traffic Intrs in the general intr mask
2051              * register.
2052              */
2053             writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2054         }
2055     }
2056 
2057     temp64 = readq(&bar0->general_int_mask);
2058     if (flag == ENABLE_INTRS)
2059         temp64 &= ~((u64)intr_mask);
2060     else
2061         temp64 = DISABLE_ALL_INTRS;
2062     writeq(temp64, &bar0->general_int_mask);
2063 
2064     nic->general_int_mask = readq(&bar0->general_int_mask);
2065 }
2066 
2067 /**
2068  *  verify_pcc_quiescent- Checks for PCC quiescent state
2069  *  @sp : private member of the device structure, which is a pointer to the
2070  *  s2io_nic structure.
2071  *  @flag: boolean controlling function path
2072  *  Return: 1 If PCC is quiescence
2073  *          0 If PCC is not quiescence
2074  */
2075 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2076 {
2077     int ret = 0, herc;
2078     struct XENA_dev_config __iomem *bar0 = sp->bar0;
2079     u64 val64 = readq(&bar0->adapter_status);
2080 
2081     herc = (sp->device_type == XFRAME_II_DEVICE);
2082 
2083     if (flag == false) {
2084         if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2085             if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2086                 ret = 1;
2087         } else {
2088             if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2089                 ret = 1;
2090         }
2091     } else {
2092         if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2093             if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2094                  ADAPTER_STATUS_RMAC_PCC_IDLE))
2095                 ret = 1;
2096         } else {
2097             if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2098                  ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2099                 ret = 1;
2100         }
2101     }
2102 
2103     return ret;
2104 }
2105 /**
2106  *  verify_xena_quiescence - Checks whether the H/W is ready
2107  *  @sp : private member of the device structure, which is a pointer to the
2108  *  s2io_nic structure.
2109  *  Description: Returns whether the H/W is ready to go or not. Depending
2110  *  on whether adapter enable bit was written or not the comparison
2111  *  differs and the calling function passes the input argument flag to
2112  *  indicate this.
2113  *  Return: 1 If xena is quiescence
2114  *          0 If Xena is not quiescence
2115  */
2116 
2117 static int verify_xena_quiescence(struct s2io_nic *sp)
2118 {
2119     int  mode;
2120     struct XENA_dev_config __iomem *bar0 = sp->bar0;
2121     u64 val64 = readq(&bar0->adapter_status);
2122     mode = s2io_verify_pci_mode(sp);
2123 
2124     if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2125         DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2126         return 0;
2127     }
2128     if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2129         DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2130         return 0;
2131     }
2132     if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2133         DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2134         return 0;
2135     }
2136     if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2137         DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2138         return 0;
2139     }
2140     if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2141         DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2142         return 0;
2143     }
2144     if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2145         DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2146         return 0;
2147     }
2148     if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2149         DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2150         return 0;
2151     }
2152     if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2153         DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2154         return 0;
2155     }
2156 
2157     /*
2158      * In PCI 33 mode, the P_PLL is not used, and therefore,
2159      * the P_PLL_LOCK bit in the adapter_status register will
2160      * not be asserted.
2161      */
2162     if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2163         sp->device_type == XFRAME_II_DEVICE &&
2164         mode != PCI_MODE_PCI_33) {
2165         DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2166         return 0;
2167     }
2168     if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2169           ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2170         DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2171         return 0;
2172     }
2173     return 1;
2174 }
2175 
2176 /**
2177  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2178  * @sp: Pointer to device specifc structure
2179  * Description :
2180  * New procedure to clear mac address reading  problems on Alpha platforms
2181  *
2182  */
2183 
2184 static void fix_mac_address(struct s2io_nic *sp)
2185 {
2186     struct XENA_dev_config __iomem *bar0 = sp->bar0;
2187     int i = 0;
2188 
2189     while (fix_mac[i] != END_SIGN) {
2190         writeq(fix_mac[i++], &bar0->gpio_control);
2191         udelay(10);
2192         (void) readq(&bar0->gpio_control);
2193     }
2194 }
2195 
2196 /**
2197  *  start_nic - Turns the device on
2198  *  @nic : device private variable.
2199  *  Description:
2200  *  This function actually turns the device on. Before this  function is
2201  *  called,all Registers are configured from their reset states
2202  *  and shared memory is allocated but the NIC is still quiescent. On
2203  *  calling this function, the device interrupts are cleared and the NIC is
2204  *  literally switched on by writing into the adapter control register.
2205  *  Return Value:
2206  *  SUCCESS on success and -1 on failure.
2207  */
2208 
2209 static int start_nic(struct s2io_nic *nic)
2210 {
2211     struct XENA_dev_config __iomem *bar0 = nic->bar0;
2212     struct net_device *dev = nic->dev;
2213     register u64 val64 = 0;
2214     u16 subid, i;
2215     struct config_param *config = &nic->config;
2216     struct mac_info *mac_control = &nic->mac_control;
2217 
2218     /*  PRC Initialization and configuration */
2219     for (i = 0; i < config->rx_ring_num; i++) {
2220         struct ring_info *ring = &mac_control->rings[i];
2221 
2222         writeq((u64)ring->rx_blocks[0].block_dma_addr,
2223                &bar0->prc_rxd0_n[i]);
2224 
2225         val64 = readq(&bar0->prc_ctrl_n[i]);
2226         if (nic->rxd_mode == RXD_MODE_1)
2227             val64 |= PRC_CTRL_RC_ENABLED;
2228         else
2229             val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2230         if (nic->device_type == XFRAME_II_DEVICE)
2231             val64 |= PRC_CTRL_GROUP_READS;
2232         val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2233         val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2234         writeq(val64, &bar0->prc_ctrl_n[i]);
2235     }
2236 
2237     if (nic->rxd_mode == RXD_MODE_3B) {
2238         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2239         val64 = readq(&bar0->rx_pa_cfg);
2240         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2241         writeq(val64, &bar0->rx_pa_cfg);
2242     }
2243 
2244     if (vlan_tag_strip == 0) {
2245         val64 = readq(&bar0->rx_pa_cfg);
2246         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2247         writeq(val64, &bar0->rx_pa_cfg);
2248         nic->vlan_strip_flag = 0;
2249     }
2250 
2251     /*
2252      * Enabling MC-RLDRAM. After enabling the device, we timeout
2253      * for around 100ms, which is approximately the time required
2254      * for the device to be ready for operation.
2255      */
2256     val64 = readq(&bar0->mc_rldram_mrs);
2257     val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2258     SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2259     val64 = readq(&bar0->mc_rldram_mrs);
2260 
2261     msleep(100);    /* Delay by around 100 ms. */
2262 
2263     /* Enabling ECC Protection. */
2264     val64 = readq(&bar0->adapter_control);
2265     val64 &= ~ADAPTER_ECC_EN;
2266     writeq(val64, &bar0->adapter_control);
2267 
2268     /*
2269      * Verify if the device is ready to be enabled, if so enable
2270      * it.
2271      */
2272     val64 = readq(&bar0->adapter_status);
2273     if (!verify_xena_quiescence(nic)) {
2274         DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2275               "Adapter status reads: 0x%llx\n",
2276               dev->name, (unsigned long long)val64);
2277         return FAILURE;
2278     }
2279 
2280     /*
2281      * With some switches, link might be already up at this point.
2282      * Because of this weird behavior, when we enable laser,
2283      * we may not get link. We need to handle this. We cannot
2284      * figure out which switch is misbehaving. So we are forced to
2285      * make a global change.
2286      */
2287 
2288     /* Enabling Laser. */
2289     val64 = readq(&bar0->adapter_control);
2290     val64 |= ADAPTER_EOI_TX_ON;
2291     writeq(val64, &bar0->adapter_control);
2292 
2293     if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2294         /*
2295          * Dont see link state interrupts initially on some switches,
2296          * so directly scheduling the link state task here.
2297          */
2298         schedule_work(&nic->set_link_task);
2299     }
2300     /* SXE-002: Initialize link and activity LED */
2301     subid = nic->pdev->subsystem_device;
2302     if (((subid & 0xFF) >= 0x07) &&
2303         (nic->device_type == XFRAME_I_DEVICE)) {
2304         val64 = readq(&bar0->gpio_control);
2305         val64 |= 0x0000800000000000ULL;
2306         writeq(val64, &bar0->gpio_control);
2307         val64 = 0x0411040400000000ULL;
2308         writeq(val64, (void __iomem *)bar0 + 0x2700);
2309     }
2310 
2311     return SUCCESS;
2312 }
2313 /**
2314  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2315  * @fifo_data: fifo data pointer
2316  * @txdlp: descriptor
2317  * @get_off: unused
2318  */
2319 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2320                     struct TxD *txdlp, int get_off)
2321 {
2322     struct s2io_nic *nic = fifo_data->nic;
2323     struct sk_buff *skb;
2324     struct TxD *txds;
2325     u16 j, frg_cnt;
2326 
2327     txds = txdlp;
2328     if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2329         dma_unmap_single(&nic->pdev->dev,
2330                  (dma_addr_t)txds->Buffer_Pointer,
2331                  sizeof(u64), DMA_TO_DEVICE);
2332         txds++;
2333     }
2334 
2335     skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2336     if (!skb) {
2337         memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2338         return NULL;
2339     }
2340     dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2341              skb_headlen(skb), DMA_TO_DEVICE);
2342     frg_cnt = skb_shinfo(skb)->nr_frags;
2343     if (frg_cnt) {
2344         txds++;
2345         for (j = 0; j < frg_cnt; j++, txds++) {
2346             const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2347             if (!txds->Buffer_Pointer)
2348                 break;
2349             dma_unmap_page(&nic->pdev->dev,
2350                        (dma_addr_t)txds->Buffer_Pointer,
2351                        skb_frag_size(frag), DMA_TO_DEVICE);
2352         }
2353     }
2354     memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2355     return skb;
2356 }
2357 
2358 /**
2359  *  free_tx_buffers - Free all queued Tx buffers
2360  *  @nic : device private variable.
2361  *  Description:
2362  *  Free all queued Tx buffers.
2363  *  Return Value: void
2364  */
2365 
2366 static void free_tx_buffers(struct s2io_nic *nic)
2367 {
2368     struct net_device *dev = nic->dev;
2369     struct sk_buff *skb;
2370     struct TxD *txdp;
2371     int i, j;
2372     int cnt = 0;
2373     struct config_param *config = &nic->config;
2374     struct mac_info *mac_control = &nic->mac_control;
2375     struct stat_block *stats = mac_control->stats_info;
2376     struct swStat *swstats = &stats->sw_stat;
2377 
2378     for (i = 0; i < config->tx_fifo_num; i++) {
2379         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2380         struct fifo_info *fifo = &mac_control->fifos[i];
2381         unsigned long flags;
2382 
2383         spin_lock_irqsave(&fifo->tx_lock, flags);
2384         for (j = 0; j < tx_cfg->fifo_len; j++) {
2385             txdp = fifo->list_info[j].list_virt_addr;
2386             skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2387             if (skb) {
2388                 swstats->mem_freed += skb->truesize;
2389                 dev_kfree_skb(skb);
2390                 cnt++;
2391             }
2392         }
2393         DBG_PRINT(INTR_DBG,
2394               "%s: forcibly freeing %d skbs on FIFO%d\n",
2395               dev->name, cnt, i);
2396         fifo->tx_curr_get_info.offset = 0;
2397         fifo->tx_curr_put_info.offset = 0;
2398         spin_unlock_irqrestore(&fifo->tx_lock, flags);
2399     }
2400 }
2401 
2402 /**
2403  *   stop_nic -  To stop the nic
2404  *   @nic : device private variable.
2405  *   Description:
2406  *   This function does exactly the opposite of what the start_nic()
2407  *   function does. This function is called to stop the device.
2408  *   Return Value:
2409  *   void.
2410  */
2411 
2412 static void stop_nic(struct s2io_nic *nic)
2413 {
2414     struct XENA_dev_config __iomem *bar0 = nic->bar0;
2415     register u64 val64 = 0;
2416     u16 interruptible;
2417 
2418     /*  Disable all interrupts */
2419     en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2420     interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2421     interruptible |= TX_PIC_INTR;
2422     en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2423 
2424     /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2425     val64 = readq(&bar0->adapter_control);
2426     val64 &= ~(ADAPTER_CNTL_EN);
2427     writeq(val64, &bar0->adapter_control);
2428 }
2429 
2430 /**
2431  *  fill_rx_buffers - Allocates the Rx side skbs
2432  *  @nic : device private variable.
2433  *  @ring: per ring structure
2434  *  @from_card_up: If this is true, we will map the buffer to get
2435  *     the dma address for buf0 and buf1 to give it to the card.
2436  *     Else we will sync the already mapped buffer to give it to the card.
2437  *  Description:
2438  *  The function allocates Rx side skbs and puts the physical
2439  *  address of these buffers into the RxD buffer pointers, so that the NIC
2440  *  can DMA the received frame into these locations.
2441  *  The NIC supports 3 receive modes, viz
2442  *  1. single buffer,
2443  *  2. three buffer and
2444  *  3. Five buffer modes.
2445  *  Each mode defines how many fragments the received frame will be split
2446  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2447  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2448  *  is split into 3 fragments. As of now only single buffer mode is
2449  *  supported.
2450  *   Return Value:
2451  *  SUCCESS on success or an appropriate -ve value on failure.
2452  */
2453 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2454                int from_card_up)
2455 {
2456     struct sk_buff *skb;
2457     struct RxD_t *rxdp;
2458     int off, size, block_no, block_no1;
2459     u32 alloc_tab = 0;
2460     u32 alloc_cnt;
2461     u64 tmp;
2462     struct buffAdd *ba;
2463     struct RxD_t *first_rxdp = NULL;
2464     u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2465     struct RxD1 *rxdp1;
2466     struct RxD3 *rxdp3;
2467     struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2468 
2469     alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2470 
2471     block_no1 = ring->rx_curr_get_info.block_index;
2472     while (alloc_tab < alloc_cnt) {
2473         block_no = ring->rx_curr_put_info.block_index;
2474 
2475         off = ring->rx_curr_put_info.offset;
2476 
2477         rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2478 
2479         if ((block_no == block_no1) &&
2480             (off == ring->rx_curr_get_info.offset) &&
2481             (rxdp->Host_Control)) {
2482             DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2483                   ring->dev->name);
2484             goto end;
2485         }
2486         if (off && (off == ring->rxd_count)) {
2487             ring->rx_curr_put_info.block_index++;
2488             if (ring->rx_curr_put_info.block_index ==
2489                 ring->block_count)
2490                 ring->rx_curr_put_info.block_index = 0;
2491             block_no = ring->rx_curr_put_info.block_index;
2492             off = 0;
2493             ring->rx_curr_put_info.offset = off;
2494             rxdp = ring->rx_blocks[block_no].block_virt_addr;
2495             DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2496                   ring->dev->name, rxdp);
2497 
2498         }
2499 
2500         if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2501             ((ring->rxd_mode == RXD_MODE_3B) &&
2502              (rxdp->Control_2 & s2BIT(0)))) {
2503             ring->rx_curr_put_info.offset = off;
2504             goto end;
2505         }
2506         /* calculate size of skb based on ring mode */
2507         size = ring->mtu +
2508             HEADER_ETHERNET_II_802_3_SIZE +
2509             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2510         if (ring->rxd_mode == RXD_MODE_1)
2511             size += NET_IP_ALIGN;
2512         else
2513             size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2514 
2515         /* allocate skb */
2516         skb = netdev_alloc_skb(nic->dev, size);
2517         if (!skb) {
2518             DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2519                   ring->dev->name);
2520             if (first_rxdp) {
2521                 dma_wmb();
2522                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2523             }
2524             swstats->mem_alloc_fail_cnt++;
2525 
2526             return -ENOMEM ;
2527         }
2528         swstats->mem_allocated += skb->truesize;
2529 
2530         if (ring->rxd_mode == RXD_MODE_1) {
2531             /* 1 buffer mode - normal operation mode */
2532             rxdp1 = (struct RxD1 *)rxdp;
2533             memset(rxdp, 0, sizeof(struct RxD1));
2534             skb_reserve(skb, NET_IP_ALIGN);
2535             rxdp1->Buffer0_ptr =
2536                 dma_map_single(&ring->pdev->dev, skb->data,
2537                            size - NET_IP_ALIGN,
2538                            DMA_FROM_DEVICE);
2539             if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2540                 goto pci_map_failed;
2541 
2542             rxdp->Control_2 =
2543                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2544             rxdp->Host_Control = (unsigned long)skb;
2545         } else if (ring->rxd_mode == RXD_MODE_3B) {
2546             /*
2547              * 2 buffer mode -
2548              * 2 buffer mode provides 128
2549              * byte aligned receive buffers.
2550              */
2551 
2552             rxdp3 = (struct RxD3 *)rxdp;
2553             /* save buffer pointers to avoid frequent dma mapping */
2554             Buffer0_ptr = rxdp3->Buffer0_ptr;
2555             Buffer1_ptr = rxdp3->Buffer1_ptr;
2556             memset(rxdp, 0, sizeof(struct RxD3));
2557             /* restore the buffer pointers for dma sync*/
2558             rxdp3->Buffer0_ptr = Buffer0_ptr;
2559             rxdp3->Buffer1_ptr = Buffer1_ptr;
2560 
2561             ba = &ring->ba[block_no][off];
2562             skb_reserve(skb, BUF0_LEN);
2563             tmp = (u64)(unsigned long)skb->data;
2564             tmp += ALIGN_SIZE;
2565             tmp &= ~ALIGN_SIZE;
2566             skb->data = (void *) (unsigned long)tmp;
2567             skb_reset_tail_pointer(skb);
2568 
2569             if (from_card_up) {
2570                 rxdp3->Buffer0_ptr =
2571                     dma_map_single(&ring->pdev->dev,
2572                                ba->ba_0, BUF0_LEN,
2573                                DMA_FROM_DEVICE);
2574                 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2575                     goto pci_map_failed;
2576             } else
2577                 dma_sync_single_for_device(&ring->pdev->dev,
2578                                (dma_addr_t)rxdp3->Buffer0_ptr,
2579                                BUF0_LEN,
2580                                DMA_FROM_DEVICE);
2581 
2582             rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2583             if (ring->rxd_mode == RXD_MODE_3B) {
2584                 /* Two buffer mode */
2585 
2586                 /*
2587                  * Buffer2 will have L3/L4 header plus
2588                  * L4 payload
2589                  */
2590                 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2591                                     skb->data,
2592                                     ring->mtu + 4,
2593                                     DMA_FROM_DEVICE);
2594 
2595                 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2596                     goto pci_map_failed;
2597 
2598                 if (from_card_up) {
2599                     rxdp3->Buffer1_ptr =
2600                         dma_map_single(&ring->pdev->dev,
2601                                    ba->ba_1,
2602                                    BUF1_LEN,
2603                                    DMA_FROM_DEVICE);
2604 
2605                     if (dma_mapping_error(&nic->pdev->dev,
2606                                   rxdp3->Buffer1_ptr)) {
2607                         dma_unmap_single(&ring->pdev->dev,
2608                                  (dma_addr_t)(unsigned long)
2609                                  skb->data,
2610                                  ring->mtu + 4,
2611                                  DMA_FROM_DEVICE);
2612                         goto pci_map_failed;
2613                     }
2614                 }
2615                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2616                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2617                     (ring->mtu + 4);
2618             }
2619             rxdp->Control_2 |= s2BIT(0);
2620             rxdp->Host_Control = (unsigned long) (skb);
2621         }
2622         if (alloc_tab & ((1 << rxsync_frequency) - 1))
2623             rxdp->Control_1 |= RXD_OWN_XENA;
2624         off++;
2625         if (off == (ring->rxd_count + 1))
2626             off = 0;
2627         ring->rx_curr_put_info.offset = off;
2628 
2629         rxdp->Control_2 |= SET_RXD_MARKER;
2630         if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2631             if (first_rxdp) {
2632                 dma_wmb();
2633                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2634             }
2635             first_rxdp = rxdp;
2636         }
2637         ring->rx_bufs_left += 1;
2638         alloc_tab++;
2639     }
2640 
2641 end:
2642     /* Transfer ownership of first descriptor to adapter just before
2643      * exiting. Before that, use memory barrier so that ownership
2644      * and other fields are seen by adapter correctly.
2645      */
2646     if (first_rxdp) {
2647         dma_wmb();
2648         first_rxdp->Control_1 |= RXD_OWN_XENA;
2649     }
2650 
2651     return SUCCESS;
2652 
2653 pci_map_failed:
2654     swstats->pci_map_fail_cnt++;
2655     swstats->mem_freed += skb->truesize;
2656     dev_kfree_skb_irq(skb);
2657     return -ENOMEM;
2658 }
2659 
2660 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2661 {
2662     struct net_device *dev = sp->dev;
2663     int j;
2664     struct sk_buff *skb;
2665     struct RxD_t *rxdp;
2666     struct RxD1 *rxdp1;
2667     struct RxD3 *rxdp3;
2668     struct mac_info *mac_control = &sp->mac_control;
2669     struct stat_block *stats = mac_control->stats_info;
2670     struct swStat *swstats = &stats->sw_stat;
2671 
2672     for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2673         rxdp = mac_control->rings[ring_no].
2674             rx_blocks[blk].rxds[j].virt_addr;
2675         skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2676         if (!skb)
2677             continue;
2678         if (sp->rxd_mode == RXD_MODE_1) {
2679             rxdp1 = (struct RxD1 *)rxdp;
2680             dma_unmap_single(&sp->pdev->dev,
2681                      (dma_addr_t)rxdp1->Buffer0_ptr,
2682                      dev->mtu +
2683                      HEADER_ETHERNET_II_802_3_SIZE +
2684                      HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2685                      DMA_FROM_DEVICE);
2686             memset(rxdp, 0, sizeof(struct RxD1));
2687         } else if (sp->rxd_mode == RXD_MODE_3B) {
2688             rxdp3 = (struct RxD3 *)rxdp;
2689             dma_unmap_single(&sp->pdev->dev,
2690                      (dma_addr_t)rxdp3->Buffer0_ptr,
2691                      BUF0_LEN, DMA_FROM_DEVICE);
2692             dma_unmap_single(&sp->pdev->dev,
2693                      (dma_addr_t)rxdp3->Buffer1_ptr,
2694                      BUF1_LEN, DMA_FROM_DEVICE);
2695             dma_unmap_single(&sp->pdev->dev,
2696                      (dma_addr_t)rxdp3->Buffer2_ptr,
2697                      dev->mtu + 4, DMA_FROM_DEVICE);
2698             memset(rxdp, 0, sizeof(struct RxD3));
2699         }
2700         swstats->mem_freed += skb->truesize;
2701         dev_kfree_skb(skb);
2702         mac_control->rings[ring_no].rx_bufs_left -= 1;
2703     }
2704 }
2705 
2706 /**
2707  *  free_rx_buffers - Frees all Rx buffers
2708  *  @sp: device private variable.
2709  *  Description:
2710  *  This function will free all Rx buffers allocated by host.
2711  *  Return Value:
2712  *  NONE.
2713  */
2714 
2715 static void free_rx_buffers(struct s2io_nic *sp)
2716 {
2717     struct net_device *dev = sp->dev;
2718     int i, blk = 0, buf_cnt = 0;
2719     struct config_param *config = &sp->config;
2720     struct mac_info *mac_control = &sp->mac_control;
2721 
2722     for (i = 0; i < config->rx_ring_num; i++) {
2723         struct ring_info *ring = &mac_control->rings[i];
2724 
2725         for (blk = 0; blk < rx_ring_sz[i]; blk++)
2726             free_rxd_blk(sp, i, blk);
2727 
2728         ring->rx_curr_put_info.block_index = 0;
2729         ring->rx_curr_get_info.block_index = 0;
2730         ring->rx_curr_put_info.offset = 0;
2731         ring->rx_curr_get_info.offset = 0;
2732         ring->rx_bufs_left = 0;
2733         DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2734               dev->name, buf_cnt, i);
2735     }
2736 }
2737 
2738 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2739 {
2740     if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2741         DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742               ring->dev->name);
2743     }
2744     return 0;
2745 }
2746 
2747 /**
2748  * s2io_poll_msix - Rx interrupt handler for NAPI support
2749  * @napi : pointer to the napi structure.
2750  * @budget : The number of packets that were budgeted to be processed
2751  * during  one pass through the 'Poll" function.
2752  * Description:
2753  * Comes into picture only if NAPI support has been incorporated. It does
2754  * the same thing that rx_intr_handler does, but not in a interrupt context
2755  * also It will process only a given number of packets.
2756  * Return value:
2757  * 0 on success and 1 if there are No Rx packets to be processed.
2758  */
2759 
2760 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2761 {
2762     struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763     struct net_device *dev = ring->dev;
2764     int pkts_processed = 0;
2765     u8 __iomem *addr = NULL;
2766     u8 val8 = 0;
2767     struct s2io_nic *nic = netdev_priv(dev);
2768     struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769     int budget_org = budget;
2770 
2771     if (unlikely(!is_s2io_card_up(nic)))
2772         return 0;
2773 
2774     pkts_processed = rx_intr_handler(ring, budget);
2775     s2io_chk_rx_buffers(nic, ring);
2776 
2777     if (pkts_processed < budget_org) {
2778         napi_complete_done(napi, pkts_processed);
2779         /*Re Enable MSI-Rx Vector*/
2780         addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2781         addr += 7 - ring->ring_no;
2782         val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783         writeb(val8, addr);
2784         val8 = readb(addr);
2785     }
2786     return pkts_processed;
2787 }
2788 
2789 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790 {
2791     struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2792     int pkts_processed = 0;
2793     int ring_pkts_processed, i;
2794     struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795     int budget_org = budget;
2796     struct config_param *config = &nic->config;
2797     struct mac_info *mac_control = &nic->mac_control;
2798 
2799     if (unlikely(!is_s2io_card_up(nic)))
2800         return 0;
2801 
2802     for (i = 0; i < config->rx_ring_num; i++) {
2803         struct ring_info *ring = &mac_control->rings[i];
2804         ring_pkts_processed = rx_intr_handler(ring, budget);
2805         s2io_chk_rx_buffers(nic, ring);
2806         pkts_processed += ring_pkts_processed;
2807         budget -= ring_pkts_processed;
2808         if (budget <= 0)
2809             break;
2810     }
2811     if (pkts_processed < budget_org) {
2812         napi_complete_done(napi, pkts_processed);
2813         /* Re enable the Rx interrupts for the ring */
2814         writeq(0, &bar0->rx_traffic_mask);
2815         readl(&bar0->rx_traffic_mask);
2816     }
2817     return pkts_processed;
2818 }
2819 
2820 #ifdef CONFIG_NET_POLL_CONTROLLER
2821 /**
2822  * s2io_netpoll - netpoll event handler entry point
2823  * @dev : pointer to the device structure.
2824  * Description:
2825  *  This function will be called by upper layer to check for events on the
2826  * interface in situations where interrupts are disabled. It is used for
2827  * specific in-kernel networking tasks, such as remote consoles and kernel
2828  * debugging over the network (example netdump in RedHat).
2829  */
2830 static void s2io_netpoll(struct net_device *dev)
2831 {
2832     struct s2io_nic *nic = netdev_priv(dev);
2833     const int irq = nic->pdev->irq;
2834     struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835     u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2836     int i;
2837     struct config_param *config = &nic->config;
2838     struct mac_info *mac_control = &nic->mac_control;
2839 
2840     if (pci_channel_offline(nic->pdev))
2841         return;
2842 
2843     disable_irq(irq);
2844 
2845     writeq(val64, &bar0->rx_traffic_int);
2846     writeq(val64, &bar0->tx_traffic_int);
2847 
2848     /* we need to free up the transmitted skbufs or else netpoll will
2849      * run out of skbs and will fail and eventually netpoll application such
2850      * as netdump will fail.
2851      */
2852     for (i = 0; i < config->tx_fifo_num; i++)
2853         tx_intr_handler(&mac_control->fifos[i]);
2854 
2855     /* check for received packet and indicate up to network */
2856     for (i = 0; i < config->rx_ring_num; i++) {
2857         struct ring_info *ring = &mac_control->rings[i];
2858 
2859         rx_intr_handler(ring, 0);
2860     }
2861 
2862     for (i = 0; i < config->rx_ring_num; i++) {
2863         struct ring_info *ring = &mac_control->rings[i];
2864 
2865         if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2866             DBG_PRINT(INFO_DBG,
2867                   "%s: Out of memory in Rx Netpoll!!\n",
2868                   dev->name);
2869             break;
2870         }
2871     }
2872     enable_irq(irq);
2873 }
2874 #endif
2875 
2876 /**
2877  *  rx_intr_handler - Rx interrupt handler
2878  *  @ring_data: per ring structure.
2879  *  @budget: budget for napi processing.
2880  *  Description:
2881  *  If the interrupt is because of a received frame or if the
2882  *  receive ring contains fresh as yet un-processed frames,this function is
2883  *  called. It picks out the RxD at which place the last Rx processing had
2884  *  stopped and sends the skb to the OSM's Rx handler and then increments
2885  *  the offset.
2886  *  Return Value:
2887  *  No. of napi packets processed.
2888  */
2889 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2890 {
2891     int get_block, put_block;
2892     struct rx_curr_get_info get_info, put_info;
2893     struct RxD_t *rxdp;
2894     struct sk_buff *skb;
2895     int pkt_cnt = 0, napi_pkts = 0;
2896     int i;
2897     struct RxD1 *rxdp1;
2898     struct RxD3 *rxdp3;
2899 
2900     if (budget <= 0)
2901         return napi_pkts;
2902 
2903     get_info = ring_data->rx_curr_get_info;
2904     get_block = get_info.block_index;
2905     memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2906     put_block = put_info.block_index;
2907     rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2908 
2909     while (RXD_IS_UP2DT(rxdp)) {
2910         /*
2911          * If your are next to put index then it's
2912          * FIFO full condition
2913          */
2914         if ((get_block == put_block) &&
2915             (get_info.offset + 1) == put_info.offset) {
2916             DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2917                   ring_data->dev->name);
2918             break;
2919         }
2920         skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2921         if (skb == NULL) {
2922             DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2923                   ring_data->dev->name);
2924             return 0;
2925         }
2926         if (ring_data->rxd_mode == RXD_MODE_1) {
2927             rxdp1 = (struct RxD1 *)rxdp;
2928             dma_unmap_single(&ring_data->pdev->dev,
2929                      (dma_addr_t)rxdp1->Buffer0_ptr,
2930                      ring_data->mtu +
2931                      HEADER_ETHERNET_II_802_3_SIZE +
2932                      HEADER_802_2_SIZE +
2933                      HEADER_SNAP_SIZE,
2934                      DMA_FROM_DEVICE);
2935         } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2936             rxdp3 = (struct RxD3 *)rxdp;
2937             dma_sync_single_for_cpu(&ring_data->pdev->dev,
2938                         (dma_addr_t)rxdp3->Buffer0_ptr,
2939                         BUF0_LEN, DMA_FROM_DEVICE);
2940             dma_unmap_single(&ring_data->pdev->dev,
2941                      (dma_addr_t)rxdp3->Buffer2_ptr,
2942                      ring_data->mtu + 4, DMA_FROM_DEVICE);
2943         }
2944         prefetch(skb->data);
2945         rx_osm_handler(ring_data, rxdp);
2946         get_info.offset++;
2947         ring_data->rx_curr_get_info.offset = get_info.offset;
2948         rxdp = ring_data->rx_blocks[get_block].
2949             rxds[get_info.offset].virt_addr;
2950         if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2951             get_info.offset = 0;
2952             ring_data->rx_curr_get_info.offset = get_info.offset;
2953             get_block++;
2954             if (get_block == ring_data->block_count)
2955                 get_block = 0;
2956             ring_data->rx_curr_get_info.block_index = get_block;
2957             rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2958         }
2959 
2960         if (ring_data->nic->config.napi) {
2961             budget--;
2962             napi_pkts++;
2963             if (!budget)
2964                 break;
2965         }
2966         pkt_cnt++;
2967         if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2968             break;
2969     }
2970     if (ring_data->lro) {
2971         /* Clear all LRO sessions before exiting */
2972         for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2973             struct lro *lro = &ring_data->lro0_n[i];
2974             if (lro->in_use) {
2975                 update_L3L4_header(ring_data->nic, lro);
2976                 queue_rx_frame(lro->parent, lro->vlan_tag);
2977                 clear_lro_session(lro);
2978             }
2979         }
2980     }
2981     return napi_pkts;
2982 }
2983 
2984 /**
2985  *  tx_intr_handler - Transmit interrupt handler
2986  *  @fifo_data : fifo data pointer
2987  *  Description:
2988  *  If an interrupt was raised to indicate DMA complete of the
2989  *  Tx packet, this function is called. It identifies the last TxD
2990  *  whose buffer was freed and frees all skbs whose data have already
2991  *  DMA'ed into the NICs internal memory.
2992  *  Return Value:
2993  *  NONE
2994  */
2995 
2996 static void tx_intr_handler(struct fifo_info *fifo_data)
2997 {
2998     struct s2io_nic *nic = fifo_data->nic;
2999     struct tx_curr_get_info get_info, put_info;
3000     struct sk_buff *skb = NULL;
3001     struct TxD *txdlp;
3002     int pkt_cnt = 0;
3003     unsigned long flags = 0;
3004     u8 err_mask;
3005     struct stat_block *stats = nic->mac_control.stats_info;
3006     struct swStat *swstats = &stats->sw_stat;
3007 
3008     if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3009         return;
3010 
3011     get_info = fifo_data->tx_curr_get_info;
3012     memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3013     txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3014     while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3015            (get_info.offset != put_info.offset) &&
3016            (txdlp->Host_Control)) {
3017         /* Check for TxD errors */
3018         if (txdlp->Control_1 & TXD_T_CODE) {
3019             unsigned long long err;
3020             err = txdlp->Control_1 & TXD_T_CODE;
3021             if (err & 0x1) {
3022                 swstats->parity_err_cnt++;
3023             }
3024 
3025             /* update t_code statistics */
3026             err_mask = err >> 48;
3027             switch (err_mask) {
3028             case 2:
3029                 swstats->tx_buf_abort_cnt++;
3030                 break;
3031 
3032             case 3:
3033                 swstats->tx_desc_abort_cnt++;
3034                 break;
3035 
3036             case 7:
3037                 swstats->tx_parity_err_cnt++;
3038                 break;
3039 
3040             case 10:
3041                 swstats->tx_link_loss_cnt++;
3042                 break;
3043 
3044             case 15:
3045                 swstats->tx_list_proc_err_cnt++;
3046                 break;
3047             }
3048         }
3049 
3050         skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3051         if (skb == NULL) {
3052             spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3053             DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3054                   __func__);
3055             return;
3056         }
3057         pkt_cnt++;
3058 
3059         /* Updating the statistics block */
3060         swstats->mem_freed += skb->truesize;
3061         dev_consume_skb_irq(skb);
3062 
3063         get_info.offset++;
3064         if (get_info.offset == get_info.fifo_len + 1)
3065             get_info.offset = 0;
3066         txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3067         fifo_data->tx_curr_get_info.offset = get_info.offset;
3068     }
3069 
3070     s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3071 
3072     spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3073 }
3074 
3075 /**
3076  *  s2io_mdio_write - Function to write in to MDIO registers
3077  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3078  *  @addr     : address value
3079  *  @value    : data value
3080  *  @dev      : pointer to net_device structure
3081  *  Description:
3082  *  This function is used to write values to the MDIO registers
3083  *  NONE
3084  */
3085 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3086                 struct net_device *dev)
3087 {
3088     u64 val64;
3089     struct s2io_nic *sp = netdev_priv(dev);
3090     struct XENA_dev_config __iomem *bar0 = sp->bar0;
3091 
3092     /* address transaction */
3093     val64 = MDIO_MMD_INDX_ADDR(addr) |
3094         MDIO_MMD_DEV_ADDR(mmd_type) |
3095         MDIO_MMS_PRT_ADDR(0x0);
3096     writeq(val64, &bar0->mdio_control);
3097     val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098     writeq(val64, &bar0->mdio_control);
3099     udelay(100);
3100 
3101     /* Data transaction */
3102     val64 = MDIO_MMD_INDX_ADDR(addr) |
3103         MDIO_MMD_DEV_ADDR(mmd_type) |
3104         MDIO_MMS_PRT_ADDR(0x0) |
3105         MDIO_MDIO_DATA(value) |
3106         MDIO_OP(MDIO_OP_WRITE_TRANS);
3107     writeq(val64, &bar0->mdio_control);
3108     val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3109     writeq(val64, &bar0->mdio_control);
3110     udelay(100);
3111 
3112     val64 = MDIO_MMD_INDX_ADDR(addr) |
3113         MDIO_MMD_DEV_ADDR(mmd_type) |
3114         MDIO_MMS_PRT_ADDR(0x0) |
3115         MDIO_OP(MDIO_OP_READ_TRANS);
3116     writeq(val64, &bar0->mdio_control);
3117     val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3118     writeq(val64, &bar0->mdio_control);
3119     udelay(100);
3120 }
3121 
3122 /**
3123  *  s2io_mdio_read - Function to write in to MDIO registers
3124  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3125  *  @addr     : address value
3126  *  @dev      : pointer to net_device structure
3127  *  Description:
3128  *  This function is used to read values to the MDIO registers
3129  *  NONE
3130  */
3131 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3132 {
3133     u64 val64 = 0x0;
3134     u64 rval64 = 0x0;
3135     struct s2io_nic *sp = netdev_priv(dev);
3136     struct XENA_dev_config __iomem *bar0 = sp->bar0;
3137 
3138     /* address transaction */
3139     val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3140              | MDIO_MMD_DEV_ADDR(mmd_type)
3141              | MDIO_MMS_PRT_ADDR(0x0));
3142     writeq(val64, &bar0->mdio_control);
3143     val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3144     writeq(val64, &bar0->mdio_control);
3145     udelay(100);
3146 
3147     /* Data transaction */
3148     val64 = MDIO_MMD_INDX_ADDR(addr) |
3149         MDIO_MMD_DEV_ADDR(mmd_type) |
3150         MDIO_MMS_PRT_ADDR(0x0) |
3151         MDIO_OP(MDIO_OP_READ_TRANS);
3152     writeq(val64, &bar0->mdio_control);
3153     val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154     writeq(val64, &bar0->mdio_control);
3155     udelay(100);
3156 
3157     /* Read the value from regs */
3158     rval64 = readq(&bar0->mdio_control);
3159     rval64 = rval64 & 0xFFFF0000;
3160     rval64 = rval64 >> 16;
3161     return rval64;
3162 }
3163 
3164 /**
3165  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3166  *  @counter      : counter value to be updated
3167  *  @regs_stat    : registers status
3168  *  @index        : index
3169  *  @flag         : flag to indicate the status
3170  *  @type         : counter type
3171  *  Description:
3172  *  This function is to check the status of the xpak counters value
3173  *  NONE
3174  */
3175 
3176 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177                   u16 flag, u16 type)
3178 {
3179     u64 mask = 0x3;
3180     u64 val64;
3181     int i;
3182     for (i = 0; i < index; i++)
3183         mask = mask << 0x2;
3184 
3185     if (flag > 0) {
3186         *counter = *counter + 1;
3187         val64 = *regs_stat & mask;
3188         val64 = val64 >> (index * 0x2);
3189         val64 = val64 + 1;
3190         if (val64 == 3) {
3191             switch (type) {
3192             case 1:
3193                 DBG_PRINT(ERR_DBG,
3194                       "Take Xframe NIC out of service.\n");
3195                 DBG_PRINT(ERR_DBG,
3196 "Excessive temperatures may result in premature transceiver failure.\n");
3197                 break;
3198             case 2:
3199                 DBG_PRINT(ERR_DBG,
3200                       "Take Xframe NIC out of service.\n");
3201                 DBG_PRINT(ERR_DBG,
3202 "Excessive bias currents may indicate imminent laser diode failure.\n");
3203                 break;
3204             case 3:
3205                 DBG_PRINT(ERR_DBG,
3206                       "Take Xframe NIC out of service.\n");
3207                 DBG_PRINT(ERR_DBG,
3208 "Excessive laser output power may saturate far-end receiver.\n");
3209                 break;
3210             default:
3211                 DBG_PRINT(ERR_DBG,
3212                       "Incorrect XPAK Alarm type\n");
3213             }
3214             val64 = 0x0;
3215         }
3216         val64 = val64 << (index * 0x2);
3217         *regs_stat = (*regs_stat & (~mask)) | (val64);
3218 
3219     } else {
3220         *regs_stat = *regs_stat & (~mask);
3221     }
3222 }
3223 
3224 /**
3225  *  s2io_updt_xpak_counter - Function to update the xpak counters
3226  *  @dev         : pointer to net_device struct
3227  *  Description:
3228  *  This function is to upate the status of the xpak counters value
3229  *  NONE
3230  */
3231 static void s2io_updt_xpak_counter(struct net_device *dev)
3232 {
3233     u16 flag  = 0x0;
3234     u16 type  = 0x0;
3235     u16 val16 = 0x0;
3236     u64 val64 = 0x0;
3237     u64 addr  = 0x0;
3238 
3239     struct s2io_nic *sp = netdev_priv(dev);
3240     struct stat_block *stats = sp->mac_control.stats_info;
3241     struct xpakStat *xstats = &stats->xpak_stat;
3242 
3243     /* Check the communication with the MDIO slave */
3244     addr = MDIO_CTRL1;
3245     val64 = 0x0;
3246     val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3247     if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3248         DBG_PRINT(ERR_DBG,
3249               "ERR: MDIO slave access failed - Returned %llx\n",
3250               (unsigned long long)val64);
3251         return;
3252     }
3253 
3254     /* Check for the expected value of control reg 1 */
3255     if (val64 != MDIO_CTRL1_SPEED10G) {
3256         DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257               "Returned: %llx- Expected: 0x%x\n",
3258               (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3259         return;
3260     }
3261 
3262     /* Loading the DOM register to MDIO register */
3263     addr = 0xA100;
3264     s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265     val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266 
3267     /* Reading the Alarm flags */
3268     addr = 0xA070;
3269     val64 = 0x0;
3270     val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3271 
3272     flag = CHECKBIT(val64, 0x7);
3273     type = 1;
3274     s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275                   &xstats->xpak_regs_stat,
3276                   0x0, flag, type);
3277 
3278     if (CHECKBIT(val64, 0x6))
3279         xstats->alarm_transceiver_temp_low++;
3280 
3281     flag = CHECKBIT(val64, 0x3);
3282     type = 2;
3283     s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284                   &xstats->xpak_regs_stat,
3285                   0x2, flag, type);
3286 
3287     if (CHECKBIT(val64, 0x2))
3288         xstats->alarm_laser_bias_current_low++;
3289 
3290     flag = CHECKBIT(val64, 0x1);
3291     type = 3;
3292     s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293                   &xstats->xpak_regs_stat,
3294                   0x4, flag, type);
3295 
3296     if (CHECKBIT(val64, 0x0))
3297         xstats->alarm_laser_output_power_low++;
3298 
3299     /* Reading the Warning flags */
3300     addr = 0xA074;
3301     val64 = 0x0;
3302     val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3303 
3304     if (CHECKBIT(val64, 0x7))
3305         xstats->warn_transceiver_temp_high++;
3306 
3307     if (CHECKBIT(val64, 0x6))
3308         xstats->warn_transceiver_temp_low++;
3309 
3310     if (CHECKBIT(val64, 0x3))
3311         xstats->warn_laser_bias_current_high++;
3312 
3313     if (CHECKBIT(val64, 0x2))
3314         xstats->warn_laser_bias_current_low++;
3315 
3316     if (CHECKBIT(val64, 0x1))
3317         xstats->warn_laser_output_power_high++;
3318 
3319     if (CHECKBIT(val64, 0x0))
3320         xstats->warn_laser_output_power_low++;
3321 }
3322 
3323 /**
3324  *  wait_for_cmd_complete - waits for a command to complete.
3325  *  @addr: address
3326  *  @busy_bit: bit to check for busy
3327  *  @bit_state: state to check
3328  *  @may_sleep: parameter indicates if sleeping when waiting for
3329  *  command complete
3330  *  Description: Function that waits for a command to Write into RMAC
3331  *  ADDR DATA registers to be completed and returns either success or
3332  *  error depending on whether the command was complete or not.
3333  *  Return value:
3334  *   SUCCESS on success and FAILURE on failure.
3335  */
3336 
3337 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3338                  int bit_state, bool may_sleep)
3339 {
3340     int ret = FAILURE, cnt = 0, delay = 1;
3341     u64 val64;
3342 
3343     if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3344         return FAILURE;
3345 
3346     do {
3347         val64 = readq(addr);
3348         if (bit_state == S2IO_BIT_RESET) {
3349             if (!(val64 & busy_bit)) {
3350                 ret = SUCCESS;
3351                 break;
3352             }
3353         } else {
3354             if (val64 & busy_bit) {
3355                 ret = SUCCESS;
3356                 break;
3357             }
3358         }
3359 
3360         if (!may_sleep)
3361             mdelay(delay);
3362         else
3363             msleep(delay);
3364 
3365         if (++cnt >= 10)
3366             delay = 50;
3367     } while (cnt < 20);
3368     return ret;
3369 }
3370 /**
3371  * check_pci_device_id - Checks if the device id is supported
3372  * @id : device id
3373  * Description: Function to check if the pci device id is supported by driver.
3374  * Return value: Actual device id if supported else PCI_ANY_ID
3375  */
3376 static u16 check_pci_device_id(u16 id)
3377 {
3378     switch (id) {
3379     case PCI_DEVICE_ID_HERC_WIN:
3380     case PCI_DEVICE_ID_HERC_UNI:
3381         return XFRAME_II_DEVICE;
3382     case PCI_DEVICE_ID_S2IO_UNI:
3383     case PCI_DEVICE_ID_S2IO_WIN:
3384         return XFRAME_I_DEVICE;
3385     default:
3386         return PCI_ANY_ID;
3387     }
3388 }
3389 
3390 /**
3391  *  s2io_reset - Resets the card.
3392  *  @sp : private member of the device structure.
3393  *  Description: Function to Reset the card. This function then also
3394  *  restores the previously saved PCI configuration space registers as
3395  *  the card reset also resets the configuration space.
3396  *  Return value:
3397  *  void.
3398  */
3399 
3400 static void s2io_reset(struct s2io_nic *sp)
3401 {
3402     struct XENA_dev_config __iomem *bar0 = sp->bar0;
3403     u64 val64;
3404     u16 subid, pci_cmd;
3405     int i;
3406     u16 val16;
3407     unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3408     unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3409     struct stat_block *stats;
3410     struct swStat *swstats;
3411 
3412     DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3413           __func__, pci_name(sp->pdev));
3414 
3415     /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3416     pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3417 
3418     val64 = SW_RESET_ALL;
3419     writeq(val64, &bar0->sw_reset);
3420     if (strstr(sp->product_name, "CX4"))
3421         msleep(750);
3422     msleep(250);
3423     for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3424 
3425         /* Restore the PCI state saved during initialization. */
3426         pci_restore_state(sp->pdev);
3427         pci_save_state(sp->pdev);
3428         pci_read_config_word(sp->pdev, 0x2, &val16);
3429         if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3430             break;
3431         msleep(200);
3432     }
3433 
3434     if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3435         DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3436 
3437     pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3438 
3439     s2io_init_pci(sp);
3440 
3441     /* Set swapper to enable I/O register access */
3442     s2io_set_swapper(sp);
3443 
3444     /* restore mac_addr entries */
3445     do_s2io_restore_unicast_mc(sp);
3446 
3447     /* Restore the MSIX table entries from local variables */
3448     restore_xmsi_data(sp);
3449 
3450     /* Clear certain PCI/PCI-X fields after reset */
3451     if (sp->device_type == XFRAME_II_DEVICE) {
3452         /* Clear "detected parity error" bit */
3453         pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3454 
3455         /* Clearing PCIX Ecc status register */
3456         pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3457 
3458         /* Clearing PCI_STATUS error reflected here */
3459         writeq(s2BIT(62), &bar0->txpic_int_reg);
3460     }
3461 
3462     /* Reset device statistics maintained by OS */
3463     memset(&sp->stats, 0, sizeof(struct net_device_stats));
3464 
3465     stats = sp->mac_control.stats_info;
3466     swstats = &stats->sw_stat;
3467 
3468     /* save link up/down time/cnt, reset/memory/watchdog cnt */
3469     up_cnt = swstats->link_up_cnt;
3470     down_cnt = swstats->link_down_cnt;
3471     up_time = swstats->link_up_time;
3472     down_time = swstats->link_down_time;
3473     reset_cnt = swstats->soft_reset_cnt;
3474     mem_alloc_cnt = swstats->mem_allocated;
3475     mem_free_cnt = swstats->mem_freed;
3476     watchdog_cnt = swstats->watchdog_timer_cnt;
3477 
3478     memset(stats, 0, sizeof(struct stat_block));
3479 
3480     /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3481     swstats->link_up_cnt = up_cnt;
3482     swstats->link_down_cnt = down_cnt;
3483     swstats->link_up_time = up_time;
3484     swstats->link_down_time = down_time;
3485     swstats->soft_reset_cnt = reset_cnt;
3486     swstats->mem_allocated = mem_alloc_cnt;
3487     swstats->mem_freed = mem_free_cnt;
3488     swstats->watchdog_timer_cnt = watchdog_cnt;
3489 
3490     /* SXE-002: Configure link and activity LED to turn it off */
3491     subid = sp->pdev->subsystem_device;
3492     if (((subid & 0xFF) >= 0x07) &&
3493         (sp->device_type == XFRAME_I_DEVICE)) {
3494         val64 = readq(&bar0->gpio_control);
3495         val64 |= 0x0000800000000000ULL;
3496         writeq(val64, &bar0->gpio_control);
3497         val64 = 0x0411040400000000ULL;
3498         writeq(val64, (void __iomem *)bar0 + 0x2700);
3499     }
3500 
3501     /*
3502      * Clear spurious ECC interrupts that would have occurred on
3503      * XFRAME II cards after reset.
3504      */
3505     if (sp->device_type == XFRAME_II_DEVICE) {
3506         val64 = readq(&bar0->pcc_err_reg);
3507         writeq(val64, &bar0->pcc_err_reg);
3508     }
3509 
3510     sp->device_enabled_once = false;
3511 }
3512 
3513 /**
3514  *  s2io_set_swapper - to set the swapper controle on the card
3515  *  @sp : private member of the device structure,
3516  *  pointer to the s2io_nic structure.
3517  *  Description: Function to set the swapper control on the card
3518  *  correctly depending on the 'endianness' of the system.
3519  *  Return value:
3520  *  SUCCESS on success and FAILURE on failure.
3521  */
3522 
3523 static int s2io_set_swapper(struct s2io_nic *sp)
3524 {
3525     struct net_device *dev = sp->dev;
3526     struct XENA_dev_config __iomem *bar0 = sp->bar0;
3527     u64 val64, valt, valr;
3528 
3529     /*
3530      * Set proper endian settings and verify the same by reading
3531      * the PIF Feed-back register.
3532      */
3533 
3534     val64 = readq(&bar0->pif_rd_swapper_fb);
3535     if (val64 != 0x0123456789ABCDEFULL) {
3536         int i = 0;
3537         static const u64 value[] = {
3538             0xC30000C3C30000C3ULL,  /* FE=1, SE=1 */
3539             0x8100008181000081ULL,  /* FE=1, SE=0 */
3540             0x4200004242000042ULL,  /* FE=0, SE=1 */
3541             0           /* FE=0, SE=0 */
3542         };
3543 
3544         while (i < 4) {
3545             writeq(value[i], &bar0->swapper_ctrl);
3546             val64 = readq(&bar0->pif_rd_swapper_fb);
3547             if (val64 == 0x0123456789ABCDEFULL)
3548                 break;
3549             i++;
3550         }
3551         if (i == 4) {
3552             DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3553                   "feedback read %llx\n",
3554                   dev->name, (unsigned long long)val64);
3555             return FAILURE;
3556         }
3557         valr = value[i];
3558     } else {
3559         valr = readq(&bar0->swapper_ctrl);
3560     }
3561 
3562     valt = 0x0123456789ABCDEFULL;
3563     writeq(valt, &bar0->xmsi_address);
3564     val64 = readq(&bar0->xmsi_address);
3565 
3566     if (val64 != valt) {
3567         int i = 0;
3568         static const u64 value[] = {
3569             0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3570             0x0081810000818100ULL,  /* FE=1, SE=0 */
3571             0x0042420000424200ULL,  /* FE=0, SE=1 */
3572             0           /* FE=0, SE=0 */
3573         };
3574 
3575         while (i < 4) {
3576             writeq((value[i] | valr), &bar0->swapper_ctrl);
3577             writeq(valt, &bar0->xmsi_address);
3578             val64 = readq(&bar0->xmsi_address);
3579             if (val64 == valt)
3580                 break;
3581             i++;
3582         }
3583         if (i == 4) {
3584             unsigned long long x = val64;
3585             DBG_PRINT(ERR_DBG,
3586                   "Write failed, Xmsi_addr reads:0x%llx\n", x);
3587             return FAILURE;
3588         }
3589     }
3590     val64 = readq(&bar0->swapper_ctrl);
3591     val64 &= 0xFFFF000000000000ULL;
3592 
3593 #ifdef __BIG_ENDIAN
3594     /*
3595      * The device by default set to a big endian format, so a
3596      * big endian driver need not set anything.
3597      */
3598     val64 |= (SWAPPER_CTRL_TXP_FE |
3599           SWAPPER_CTRL_TXP_SE |
3600           SWAPPER_CTRL_TXD_R_FE |
3601           SWAPPER_CTRL_TXD_W_FE |
3602           SWAPPER_CTRL_TXF_R_FE |
3603           SWAPPER_CTRL_RXD_R_FE |
3604           SWAPPER_CTRL_RXD_W_FE |
3605           SWAPPER_CTRL_RXF_W_FE |
3606           SWAPPER_CTRL_XMSI_FE |
3607           SWAPPER_CTRL_STATS_FE |
3608           SWAPPER_CTRL_STATS_SE);
3609     if (sp->config.intr_type == INTA)
3610         val64 |= SWAPPER_CTRL_XMSI_SE;
3611     writeq(val64, &bar0->swapper_ctrl);
3612 #else
3613     /*
3614      * Initially we enable all bits to make it accessible by the
3615      * driver, then we selectively enable only those bits that
3616      * we want to set.
3617      */
3618     val64 |= (SWAPPER_CTRL_TXP_FE |
3619           SWAPPER_CTRL_TXP_SE |
3620           SWAPPER_CTRL_TXD_R_FE |
3621           SWAPPER_CTRL_TXD_R_SE |
3622           SWAPPER_CTRL_TXD_W_FE |
3623           SWAPPER_CTRL_TXD_W_SE |
3624           SWAPPER_CTRL_TXF_R_FE |
3625           SWAPPER_CTRL_RXD_R_FE |
3626           SWAPPER_CTRL_RXD_R_SE |
3627           SWAPPER_CTRL_RXD_W_FE |
3628           SWAPPER_CTRL_RXD_W_SE |
3629           SWAPPER_CTRL_RXF_W_FE |
3630           SWAPPER_CTRL_XMSI_FE |
3631           SWAPPER_CTRL_STATS_FE |
3632           SWAPPER_CTRL_STATS_SE);
3633     if (sp->config.intr_type == INTA)
3634         val64 |= SWAPPER_CTRL_XMSI_SE;
3635     writeq(val64, &bar0->swapper_ctrl);
3636 #endif
3637     val64 = readq(&bar0->swapper_ctrl);
3638 
3639     /*
3640      * Verifying if endian settings are accurate by reading a
3641      * feedback register.
3642      */
3643     val64 = readq(&bar0->pif_rd_swapper_fb);
3644     if (val64 != 0x0123456789ABCDEFULL) {
3645         /* Endian settings are incorrect, calls for another dekko. */
3646         DBG_PRINT(ERR_DBG,
3647               "%s: Endian settings are wrong, feedback read %llx\n",
3648               dev->name, (unsigned long long)val64);
3649         return FAILURE;
3650     }
3651 
3652     return SUCCESS;
3653 }
3654 
3655 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3656 {
3657     struct XENA_dev_config __iomem *bar0 = nic->bar0;
3658     u64 val64;
3659     int ret = 0, cnt = 0;
3660 
3661     do {
3662         val64 = readq(&bar0->xmsi_access);
3663         if (!(val64 & s2BIT(15)))
3664             break;
3665         mdelay(1);
3666         cnt++;
3667     } while (cnt < 5);
3668     if (cnt == 5) {
3669         DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3670         ret = 1;
3671     }
3672 
3673     return ret;
3674 }
3675 
3676 static void restore_xmsi_data(struct s2io_nic *nic)
3677 {
3678     struct XENA_dev_config __iomem *bar0 = nic->bar0;
3679     u64 val64;
3680     int i, msix_index;
3681 
3682     if (nic->device_type == XFRAME_I_DEVICE)
3683         return;
3684 
3685     for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3686         msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3687         writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3688         writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3689         val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3690         writeq(val64, &bar0->xmsi_access);
3691         if (wait_for_msix_trans(nic, msix_index))
3692             DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3693                   __func__, msix_index);
3694     }
3695 }
3696 
3697 static void store_xmsi_data(struct s2io_nic *nic)
3698 {
3699     struct XENA_dev_config __iomem *bar0 = nic->bar0;
3700     u64 val64, addr, data;
3701     int i, msix_index;
3702 
3703     if (nic->device_type == XFRAME_I_DEVICE)
3704         return;
3705 
3706     /* Store and display */
3707     for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3708         msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3709         val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3710         writeq(val64, &bar0->xmsi_access);
3711         if (wait_for_msix_trans(nic, msix_index)) {
3712             DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3713                   __func__, msix_index);
3714             continue;
3715         }
3716         addr = readq(&bar0->xmsi_address);
3717         data = readq(&bar0->xmsi_data);
3718         if (addr && data) {
3719             nic->msix_info[i].addr = addr;
3720             nic->msix_info[i].data = data;
3721         }
3722     }
3723 }
3724 
3725 static int s2io_enable_msi_x(struct s2io_nic *nic)
3726 {
3727     struct XENA_dev_config __iomem *bar0 = nic->bar0;
3728     u64 rx_mat;
3729     u16 msi_control; /* Temp variable */
3730     int ret, i, j, msix_indx = 1;
3731     int size;
3732     struct stat_block *stats = nic->mac_control.stats_info;
3733     struct swStat *swstats = &stats->sw_stat;
3734 
3735     size = nic->num_entries * sizeof(struct msix_entry);
3736     nic->entries = kzalloc(size, GFP_KERNEL);
3737     if (!nic->entries) {
3738         DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3739               __func__);
3740         swstats->mem_alloc_fail_cnt++;
3741         return -ENOMEM;
3742     }
3743     swstats->mem_allocated += size;
3744 
3745     size = nic->num_entries * sizeof(struct s2io_msix_entry);
3746     nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3747     if (!nic->s2io_entries) {
3748         DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3749               __func__);
3750         swstats->mem_alloc_fail_cnt++;
3751         kfree(nic->entries);
3752         swstats->mem_freed
3753             += (nic->num_entries * sizeof(struct msix_entry));
3754         return -ENOMEM;
3755     }
3756     swstats->mem_allocated += size;
3757 
3758     nic->entries[0].entry = 0;
3759     nic->s2io_entries[0].entry = 0;
3760     nic->s2io_entries[0].in_use = MSIX_FLG;
3761     nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3762     nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3763 
3764     for (i = 1; i < nic->num_entries; i++) {
3765         nic->entries[i].entry = ((i - 1) * 8) + 1;
3766         nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3767         nic->s2io_entries[i].arg = NULL;
3768         nic->s2io_entries[i].in_use = 0;
3769     }
3770 
3771     rx_mat = readq(&bar0->rx_mat);
3772     for (j = 0; j < nic->config.rx_ring_num; j++) {
3773         rx_mat |= RX_MAT_SET(j, msix_indx);
3774         nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3775         nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3776         nic->s2io_entries[j+1].in_use = MSIX_FLG;
3777         msix_indx += 8;
3778     }
3779     writeq(rx_mat, &bar0->rx_mat);
3780     readq(&bar0->rx_mat);
3781 
3782     ret = pci_enable_msix_range(nic->pdev, nic->entries,
3783                     nic->num_entries, nic->num_entries);
3784     /* We fail init if error or we get less vectors than min required */
3785     if (ret < 0) {
3786         DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3787         kfree(nic->entries);
3788         swstats->mem_freed += nic->num_entries *
3789             sizeof(struct msix_entry);
3790         kfree(nic->s2io_entries);
3791         swstats->mem_freed += nic->num_entries *
3792             sizeof(struct s2io_msix_entry);
3793         nic->entries = NULL;
3794         nic->s2io_entries = NULL;
3795         return -ENOMEM;
3796     }
3797 
3798     /*
3799      * To enable MSI-X, MSI also needs to be enabled, due to a bug
3800      * in the herc NIC. (Temp change, needs to be removed later)
3801      */
3802     pci_read_config_word(nic->pdev, 0x42, &msi_control);
3803     msi_control |= 0x1; /* Enable MSI */
3804     pci_write_config_word(nic->pdev, 0x42, msi_control);
3805 
3806     return 0;
3807 }
3808 
3809 /* Handle software interrupt used during MSI(X) test */
3810 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3811 {
3812     struct s2io_nic *sp = dev_id;
3813 
3814     sp->msi_detected = 1;
3815     wake_up(&sp->msi_wait);
3816 
3817     return IRQ_HANDLED;
3818 }
3819 
3820 /* Test interrupt path by forcing a software IRQ */
3821 static int s2io_test_msi(struct s2io_nic *sp)
3822 {
3823     struct pci_dev *pdev = sp->pdev;
3824     struct XENA_dev_config __iomem *bar0 = sp->bar0;
3825     int err;
3826     u64 val64, saved64;
3827 
3828     err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3829               sp->name, sp);
3830     if (err) {
3831         DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3832               sp->dev->name, pci_name(pdev), pdev->irq);
3833         return err;
3834     }
3835 
3836     init_waitqueue_head(&sp->msi_wait);
3837     sp->msi_detected = 0;
3838 
3839     saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3840     val64 |= SCHED_INT_CTRL_ONE_SHOT;
3841     val64 |= SCHED_INT_CTRL_TIMER_EN;
3842     val64 |= SCHED_INT_CTRL_INT2MSI(1);
3843     writeq(val64, &bar0->scheduled_int_ctrl);
3844 
3845     wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3846 
3847     if (!sp->msi_detected) {
3848         /* MSI(X) test failed, go back to INTx mode */
3849         DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3850               "using MSI(X) during test\n",
3851               sp->dev->name, pci_name(pdev));
3852 
3853         err = -EOPNOTSUPP;
3854     }
3855 
3856     free_irq(sp->entries[1].vector, sp);
3857 
3858     writeq(saved64, &bar0->scheduled_int_ctrl);
3859 
3860     return err;
3861 }
3862 
3863 static void remove_msix_isr(struct s2io_nic *sp)
3864 {
3865     int i;
3866     u16 msi_control;
3867 
3868     for (i = 0; i < sp->num_entries; i++) {
3869         if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3870             int vector = sp->entries[i].vector;
3871             void *arg = sp->s2io_entries[i].arg;
3872             free_irq(vector, arg);
3873         }
3874     }
3875 
3876     kfree(sp->entries);
3877     kfree(sp->s2io_entries);
3878     sp->entries = NULL;
3879     sp->s2io_entries = NULL;
3880 
3881     pci_read_config_word(sp->pdev, 0x42, &msi_control);
3882     msi_control &= 0xFFFE; /* Disable MSI */
3883     pci_write_config_word(sp->pdev, 0x42, msi_control);
3884 
3885     pci_disable_msix(sp->pdev);
3886 }
3887 
3888 static void remove_inta_isr(struct s2io_nic *sp)
3889 {
3890     free_irq(sp->pdev->irq, sp->dev);
3891 }
3892 
3893 /* ********************************************************* *
3894  * Functions defined below concern the OS part of the driver *
3895  * ********************************************************* */
3896 
3897 /**
3898  *  s2io_open - open entry point of the driver
3899  *  @dev : pointer to the device structure.
3900  *  Description:
3901  *  This function is the open entry point of the driver. It mainly calls a
3902  *  function to allocate Rx buffers and inserts them into the buffer
3903  *  descriptors and then enables the Rx part of the NIC.
3904  *  Return value:
3905  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3906  *   file on failure.
3907  */
3908 
3909 static int s2io_open(struct net_device *dev)
3910 {
3911     struct s2io_nic *sp = netdev_priv(dev);
3912     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3913     int err = 0;
3914 
3915     /*
3916      * Make sure you have link off by default every time
3917      * Nic is initialized
3918      */
3919     netif_carrier_off(dev);
3920     sp->last_link_state = 0;
3921 
3922     /* Initialize H/W and enable interrupts */
3923     err = s2io_card_up(sp);
3924     if (err) {
3925         DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3926               dev->name);
3927         goto hw_init_failed;
3928     }
3929 
3930     if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3931         DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3932         s2io_card_down(sp);
3933         err = -ENODEV;
3934         goto hw_init_failed;
3935     }
3936     s2io_start_all_tx_queue(sp);
3937     return 0;
3938 
3939 hw_init_failed:
3940     if (sp->config.intr_type == MSI_X) {
3941         if (sp->entries) {
3942             kfree(sp->entries);
3943             swstats->mem_freed += sp->num_entries *
3944                 sizeof(struct msix_entry);
3945         }
3946         if (sp->s2io_entries) {
3947             kfree(sp->s2io_entries);
3948             swstats->mem_freed += sp->num_entries *
3949                 sizeof(struct s2io_msix_entry);
3950         }
3951     }
3952     return err;
3953 }
3954 
3955 /**
3956  *  s2io_close -close entry point of the driver
3957  *  @dev : device pointer.
3958  *  Description:
3959  *  This is the stop entry point of the driver. It needs to undo exactly
3960  *  whatever was done by the open entry point,thus it's usually referred to
3961  *  as the close function.Among other things this function mainly stops the
3962  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3963  *  Return value:
3964  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3965  *  file on failure.
3966  */
3967 
3968 static int s2io_close(struct net_device *dev)
3969 {
3970     struct s2io_nic *sp = netdev_priv(dev);
3971     struct config_param *config = &sp->config;
3972     u64 tmp64;
3973     int offset;
3974 
3975     /* Return if the device is already closed               *
3976      *  Can happen when s2io_card_up failed in change_mtu    *
3977      */
3978     if (!is_s2io_card_up(sp))
3979         return 0;
3980 
3981     s2io_stop_all_tx_queue(sp);
3982     /* delete all populated mac entries */
3983     for (offset = 1; offset < config->max_mc_addr; offset++) {
3984         tmp64 = do_s2io_read_unicast_mc(sp, offset);
3985         if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3986             do_s2io_delete_unicast_mc(sp, tmp64);
3987     }
3988 
3989     s2io_card_down(sp);
3990 
3991     return 0;
3992 }
3993 
3994 /**
3995  *  s2io_xmit - Tx entry point of te driver
3996  *  @skb : the socket buffer containing the Tx data.
3997  *  @dev : device pointer.
3998  *  Description :
3999  *  This function is the Tx entry point of the driver. S2IO NIC supports
4000  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4001  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4002  *  not be upadted.
4003  *  Return value:
4004  *  0 on success & 1 on failure.
4005  */
4006 
4007 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4008 {
4009     struct s2io_nic *sp = netdev_priv(dev);
4010     u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4011     register u64 val64;
4012     struct TxD *txdp;
4013     struct TxFIFO_element __iomem *tx_fifo;
4014     unsigned long flags = 0;
4015     u16 vlan_tag = 0;
4016     struct fifo_info *fifo = NULL;
4017     int offload_type;
4018     int enable_per_list_interrupt = 0;
4019     struct config_param *config = &sp->config;
4020     struct mac_info *mac_control = &sp->mac_control;
4021     struct stat_block *stats = mac_control->stats_info;
4022     struct swStat *swstats = &stats->sw_stat;
4023 
4024     DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4025 
4026     if (unlikely(skb->len <= 0)) {
4027         DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4028         dev_kfree_skb_any(skb);
4029         return NETDEV_TX_OK;
4030     }
4031 
4032     if (!is_s2io_card_up(sp)) {
4033         DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4034               dev->name);
4035         dev_kfree_skb_any(skb);
4036         return NETDEV_TX_OK;
4037     }
4038 
4039     queue = 0;
4040     if (skb_vlan_tag_present(skb))
4041         vlan_tag = skb_vlan_tag_get(skb);
4042     if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4043         if (skb->protocol == htons(ETH_P_IP)) {
4044             struct iphdr *ip;
4045             struct tcphdr *th;
4046             ip = ip_hdr(skb);
4047 
4048             if (!ip_is_fragment(ip)) {
4049                 th = (struct tcphdr *)(((unsigned char *)ip) +
4050                                ip->ihl*4);
4051 
4052                 if (ip->protocol == IPPROTO_TCP) {
4053                     queue_len = sp->total_tcp_fifos;
4054                     queue = (ntohs(th->source) +
4055                          ntohs(th->dest)) &
4056                         sp->fifo_selector[queue_len - 1];
4057                     if (queue >= queue_len)
4058                         queue = queue_len - 1;
4059                 } else if (ip->protocol == IPPROTO_UDP) {
4060                     queue_len = sp->total_udp_fifos;
4061                     queue = (ntohs(th->source) +
4062                          ntohs(th->dest)) &
4063                         sp->fifo_selector[queue_len - 1];
4064                     if (queue >= queue_len)
4065                         queue = queue_len - 1;
4066                     queue += sp->udp_fifo_idx;
4067                     if (skb->len > 1024)
4068                         enable_per_list_interrupt = 1;
4069                 }
4070             }
4071         }
4072     } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4073         /* get fifo number based on skb->priority value */
4074         queue = config->fifo_mapping
4075             [skb->priority & (MAX_TX_FIFOS - 1)];
4076     fifo = &mac_control->fifos[queue];
4077 
4078     spin_lock_irqsave(&fifo->tx_lock, flags);
4079 
4080     if (sp->config.multiq) {
4081         if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4082             spin_unlock_irqrestore(&fifo->tx_lock, flags);
4083             return NETDEV_TX_BUSY;
4084         }
4085     } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4086         if (netif_queue_stopped(dev)) {
4087             spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088             return NETDEV_TX_BUSY;
4089         }
4090     }
4091 
4092     put_off = (u16)fifo->tx_curr_put_info.offset;
4093     get_off = (u16)fifo->tx_curr_get_info.offset;
4094     txdp = fifo->list_info[put_off].list_virt_addr;
4095 
4096     queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4097     /* Avoid "put" pointer going beyond "get" pointer */
4098     if (txdp->Host_Control ||
4099         ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4100         DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4101         s2io_stop_tx_queue(sp, fifo->fifo_no);
4102         dev_kfree_skb_any(skb);
4103         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4104         return NETDEV_TX_OK;
4105     }
4106 
4107     offload_type = s2io_offload_type(skb);
4108     if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4109         txdp->Control_1 |= TXD_TCP_LSO_EN;
4110         txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4111     }
4112     if (skb->ip_summed == CHECKSUM_PARTIAL) {
4113         txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4114                     TXD_TX_CKO_TCP_EN |
4115                     TXD_TX_CKO_UDP_EN);
4116     }
4117     txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4118     txdp->Control_1 |= TXD_LIST_OWN_XENA;
4119     txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4120     if (enable_per_list_interrupt)
4121         if (put_off & (queue_len >> 5))
4122             txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4123     if (vlan_tag) {
4124         txdp->Control_2 |= TXD_VLAN_ENABLE;
4125         txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4126     }
4127 
4128     frg_len = skb_headlen(skb);
4129     txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4130                           frg_len, DMA_TO_DEVICE);
4131     if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4132         goto pci_map_failed;
4133 
4134     txdp->Host_Control = (unsigned long)skb;
4135     txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4136 
4137     frg_cnt = skb_shinfo(skb)->nr_frags;
4138     /* For fragmented SKB. */
4139     for (i = 0; i < frg_cnt; i++) {
4140         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4141         /* A '0' length fragment will be ignored */
4142         if (!skb_frag_size(frag))
4143             continue;
4144         txdp++;
4145         txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4146                                  frag, 0,
4147                                  skb_frag_size(frag),
4148                                  DMA_TO_DEVICE);
4149         txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4150     }
4151     txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4152 
4153     tx_fifo = mac_control->tx_FIFO_start[queue];
4154     val64 = fifo->list_info[put_off].list_phy_addr;
4155     writeq(val64, &tx_fifo->TxDL_Pointer);
4156 
4157     val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4158          TX_FIFO_LAST_LIST);
4159     if (offload_type)
4160         val64 |= TX_FIFO_SPECIAL_FUNC;
4161 
4162     writeq(val64, &tx_fifo->List_Control);
4163 
4164     put_off++;
4165     if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4166         put_off = 0;
4167     fifo->tx_curr_put_info.offset = put_off;
4168 
4169     /* Avoid "put" pointer going beyond "get" pointer */
4170     if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4171         swstats->fifo_full_cnt++;
4172         DBG_PRINT(TX_DBG,
4173               "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4174               put_off, get_off);
4175         s2io_stop_tx_queue(sp, fifo->fifo_no);
4176     }
4177     swstats->mem_allocated += skb->truesize;
4178     spin_unlock_irqrestore(&fifo->tx_lock, flags);
4179 
4180     if (sp->config.intr_type == MSI_X)
4181         tx_intr_handler(fifo);
4182 
4183     return NETDEV_TX_OK;
4184 
4185 pci_map_failed:
4186     swstats->pci_map_fail_cnt++;
4187     s2io_stop_tx_queue(sp, fifo->fifo_no);
4188     swstats->mem_freed += skb->truesize;
4189     dev_kfree_skb_any(skb);
4190     spin_unlock_irqrestore(&fifo->tx_lock, flags);
4191     return NETDEV_TX_OK;
4192 }
4193 
4194 static void
4195 s2io_alarm_handle(struct timer_list *t)
4196 {
4197     struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4198     struct net_device *dev = sp->dev;
4199 
4200     s2io_handle_errors(dev);
4201     mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4202 }
4203 
4204 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4205 {
4206     struct ring_info *ring = (struct ring_info *)dev_id;
4207     struct s2io_nic *sp = ring->nic;
4208     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4209 
4210     if (unlikely(!is_s2io_card_up(sp)))
4211         return IRQ_HANDLED;
4212 
4213     if (sp->config.napi) {
4214         u8 __iomem *addr = NULL;
4215         u8 val8 = 0;
4216 
4217         addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4218         addr += (7 - ring->ring_no);
4219         val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4220         writeb(val8, addr);
4221         val8 = readb(addr);
4222         napi_schedule(&ring->napi);
4223     } else {
4224         rx_intr_handler(ring, 0);
4225         s2io_chk_rx_buffers(sp, ring);
4226     }
4227 
4228     return IRQ_HANDLED;
4229 }
4230 
4231 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4232 {
4233     int i;
4234     struct fifo_info *fifos = (struct fifo_info *)dev_id;
4235     struct s2io_nic *sp = fifos->nic;
4236     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4237     struct config_param *config  = &sp->config;
4238     u64 reason;
4239 
4240     if (unlikely(!is_s2io_card_up(sp)))
4241         return IRQ_NONE;
4242 
4243     reason = readq(&bar0->general_int_status);
4244     if (unlikely(reason == S2IO_MINUS_ONE))
4245         /* Nothing much can be done. Get out */
4246         return IRQ_HANDLED;
4247 
4248     if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4249         writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4250 
4251         if (reason & GEN_INTR_TXPIC)
4252             s2io_txpic_intr_handle(sp);
4253 
4254         if (reason & GEN_INTR_TXTRAFFIC)
4255             writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4256 
4257         for (i = 0; i < config->tx_fifo_num; i++)
4258             tx_intr_handler(&fifos[i]);
4259 
4260         writeq(sp->general_int_mask, &bar0->general_int_mask);
4261         readl(&bar0->general_int_status);
4262         return IRQ_HANDLED;
4263     }
4264     /* The interrupt was not raised by us */
4265     return IRQ_NONE;
4266 }
4267 
4268 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4269 {
4270     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4271     u64 val64;
4272 
4273     val64 = readq(&bar0->pic_int_status);
4274     if (val64 & PIC_INT_GPIO) {
4275         val64 = readq(&bar0->gpio_int_reg);
4276         if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4277             (val64 & GPIO_INT_REG_LINK_UP)) {
4278             /*
4279              * This is unstable state so clear both up/down
4280              * interrupt and adapter to re-evaluate the link state.
4281              */
4282             val64 |= GPIO_INT_REG_LINK_DOWN;
4283             val64 |= GPIO_INT_REG_LINK_UP;
4284             writeq(val64, &bar0->gpio_int_reg);
4285             val64 = readq(&bar0->gpio_int_mask);
4286             val64 &= ~(GPIO_INT_MASK_LINK_UP |
4287                    GPIO_INT_MASK_LINK_DOWN);
4288             writeq(val64, &bar0->gpio_int_mask);
4289         } else if (val64 & GPIO_INT_REG_LINK_UP) {
4290             val64 = readq(&bar0->adapter_status);
4291             /* Enable Adapter */
4292             val64 = readq(&bar0->adapter_control);
4293             val64 |= ADAPTER_CNTL_EN;
4294             writeq(val64, &bar0->adapter_control);
4295             val64 |= ADAPTER_LED_ON;
4296             writeq(val64, &bar0->adapter_control);
4297             if (!sp->device_enabled_once)
4298                 sp->device_enabled_once = 1;
4299 
4300             s2io_link(sp, LINK_UP);
4301             /*
4302              * unmask link down interrupt and mask link-up
4303              * intr
4304              */
4305             val64 = readq(&bar0->gpio_int_mask);
4306             val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4307             val64 |= GPIO_INT_MASK_LINK_UP;
4308             writeq(val64, &bar0->gpio_int_mask);
4309 
4310         } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4311             val64 = readq(&bar0->adapter_status);
4312             s2io_link(sp, LINK_DOWN);
4313             /* Link is down so unmaks link up interrupt */
4314             val64 = readq(&bar0->gpio_int_mask);
4315             val64 &= ~GPIO_INT_MASK_LINK_UP;
4316             val64 |= GPIO_INT_MASK_LINK_DOWN;
4317             writeq(val64, &bar0->gpio_int_mask);
4318 
4319             /* turn off LED */
4320             val64 = readq(&bar0->adapter_control);
4321             val64 = val64 & (~ADAPTER_LED_ON);
4322             writeq(val64, &bar0->adapter_control);
4323         }
4324     }
4325     val64 = readq(&bar0->gpio_int_mask);
4326 }
4327 
4328 /**
4329  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4330  *  @value: alarm bits
4331  *  @addr: address value
4332  *  @cnt: counter variable
4333  *  Description: Check for alarm and increment the counter
4334  *  Return Value:
4335  *  1 - if alarm bit set
4336  *  0 - if alarm bit is not set
4337  */
4338 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4339                  unsigned long long *cnt)
4340 {
4341     u64 val64;
4342     val64 = readq(addr);
4343     if (val64 & value) {
4344         writeq(val64, addr);
4345         (*cnt)++;
4346         return 1;
4347     }
4348     return 0;
4349 
4350 }
4351 
4352 /**
4353  *  s2io_handle_errors - Xframe error indication handler
4354  *  @dev_id: opaque handle to dev
4355  *  Description: Handle alarms such as loss of link, single or
4356  *  double ECC errors, critical and serious errors.
4357  *  Return Value:
4358  *  NONE
4359  */
4360 static void s2io_handle_errors(void *dev_id)
4361 {
4362     struct net_device *dev = (struct net_device *)dev_id;
4363     struct s2io_nic *sp = netdev_priv(dev);
4364     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4365     u64 temp64 = 0, val64 = 0;
4366     int i = 0;
4367 
4368     struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4369     struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4370 
4371     if (!is_s2io_card_up(sp))
4372         return;
4373 
4374     if (pci_channel_offline(sp->pdev))
4375         return;
4376 
4377     memset(&sw_stat->ring_full_cnt, 0,
4378            sizeof(sw_stat->ring_full_cnt));
4379 
4380     /* Handling the XPAK counters update */
4381     if (stats->xpak_timer_count < 72000) {
4382         /* waiting for an hour */
4383         stats->xpak_timer_count++;
4384     } else {
4385         s2io_updt_xpak_counter(dev);
4386         /* reset the count to zero */
4387         stats->xpak_timer_count = 0;
4388     }
4389 
4390     /* Handling link status change error Intr */
4391     if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4392         val64 = readq(&bar0->mac_rmac_err_reg);
4393         writeq(val64, &bar0->mac_rmac_err_reg);
4394         if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4395             schedule_work(&sp->set_link_task);
4396     }
4397 
4398     /* In case of a serious error, the device will be Reset. */
4399     if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4400                   &sw_stat->serious_err_cnt))
4401         goto reset;
4402 
4403     /* Check for data parity error */
4404     if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4405                   &sw_stat->parity_err_cnt))
4406         goto reset;
4407 
4408     /* Check for ring full counter */
4409     if (sp->device_type == XFRAME_II_DEVICE) {
4410         val64 = readq(&bar0->ring_bump_counter1);
4411         for (i = 0; i < 4; i++) {
4412             temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4413             temp64 >>= 64 - ((i+1)*16);
4414             sw_stat->ring_full_cnt[i] += temp64;
4415         }
4416 
4417         val64 = readq(&bar0->ring_bump_counter2);
4418         for (i = 0; i < 4; i++) {
4419             temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4420             temp64 >>= 64 - ((i+1)*16);
4421             sw_stat->ring_full_cnt[i+4] += temp64;
4422         }
4423     }
4424 
4425     val64 = readq(&bar0->txdma_int_status);
4426     /*check for pfc_err*/
4427     if (val64 & TXDMA_PFC_INT) {
4428         if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4429                       PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4430                       PFC_PCIX_ERR,
4431                       &bar0->pfc_err_reg,
4432                       &sw_stat->pfc_err_cnt))
4433             goto reset;
4434         do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4435                       &bar0->pfc_err_reg,
4436                       &sw_stat->pfc_err_cnt);
4437     }
4438 
4439     /*check for tda_err*/
4440     if (val64 & TXDMA_TDA_INT) {
4441         if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4442                       TDA_SM0_ERR_ALARM |
4443                       TDA_SM1_ERR_ALARM,
4444                       &bar0->tda_err_reg,
4445                       &sw_stat->tda_err_cnt))
4446             goto reset;
4447         do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4448                       &bar0->tda_err_reg,
4449                       &sw_stat->tda_err_cnt);
4450     }
4451     /*check for pcc_err*/
4452     if (val64 & TXDMA_PCC_INT) {
4453         if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4454                       PCC_N_SERR | PCC_6_COF_OV_ERR |
4455                       PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4456                       PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4457                       PCC_TXB_ECC_DB_ERR,
4458                       &bar0->pcc_err_reg,
4459                       &sw_stat->pcc_err_cnt))
4460             goto reset;
4461         do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4462                       &bar0->pcc_err_reg,
4463                       &sw_stat->pcc_err_cnt);
4464     }
4465 
4466     /*check for tti_err*/
4467     if (val64 & TXDMA_TTI_INT) {
4468         if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4469                       &bar0->tti_err_reg,
4470                       &sw_stat->tti_err_cnt))
4471             goto reset;
4472         do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4473                       &bar0->tti_err_reg,
4474                       &sw_stat->tti_err_cnt);
4475     }
4476 
4477     /*check for lso_err*/
4478     if (val64 & TXDMA_LSO_INT) {
4479         if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4480                       LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4481                       &bar0->lso_err_reg,
4482                       &sw_stat->lso_err_cnt))
4483             goto reset;
4484         do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4485                       &bar0->lso_err_reg,
4486                       &sw_stat->lso_err_cnt);
4487     }
4488 
4489     /*check for tpa_err*/
4490     if (val64 & TXDMA_TPA_INT) {
4491         if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4492                       &bar0->tpa_err_reg,
4493                       &sw_stat->tpa_err_cnt))
4494             goto reset;
4495         do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4496                       &bar0->tpa_err_reg,
4497                       &sw_stat->tpa_err_cnt);
4498     }
4499 
4500     /*check for sm_err*/
4501     if (val64 & TXDMA_SM_INT) {
4502         if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4503                       &bar0->sm_err_reg,
4504                       &sw_stat->sm_err_cnt))
4505             goto reset;
4506     }
4507 
4508     val64 = readq(&bar0->mac_int_status);
4509     if (val64 & MAC_INT_STATUS_TMAC_INT) {
4510         if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4511                       &bar0->mac_tmac_err_reg,
4512                       &sw_stat->mac_tmac_err_cnt))
4513             goto reset;
4514         do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4515                       TMAC_DESC_ECC_SG_ERR |
4516                       TMAC_DESC_ECC_DB_ERR,
4517                       &bar0->mac_tmac_err_reg,
4518                       &sw_stat->mac_tmac_err_cnt);
4519     }
4520 
4521     val64 = readq(&bar0->xgxs_int_status);
4522     if (val64 & XGXS_INT_STATUS_TXGXS) {
4523         if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4524                       &bar0->xgxs_txgxs_err_reg,
4525                       &sw_stat->xgxs_txgxs_err_cnt))
4526             goto reset;
4527         do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4528                       &bar0->xgxs_txgxs_err_reg,
4529                       &sw_stat->xgxs_txgxs_err_cnt);
4530     }
4531 
4532     val64 = readq(&bar0->rxdma_int_status);
4533     if (val64 & RXDMA_INT_RC_INT_M) {
4534         if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4535                       RC_FTC_ECC_DB_ERR |
4536                       RC_PRCn_SM_ERR_ALARM |
4537                       RC_FTC_SM_ERR_ALARM,
4538                       &bar0->rc_err_reg,
4539                       &sw_stat->rc_err_cnt))
4540             goto reset;
4541         do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4542                       RC_FTC_ECC_SG_ERR |
4543                       RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4544                       &sw_stat->rc_err_cnt);
4545         if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4546                       PRC_PCI_AB_WR_Rn |
4547                       PRC_PCI_AB_F_WR_Rn,
4548                       &bar0->prc_pcix_err_reg,
4549                       &sw_stat->prc_pcix_err_cnt))
4550             goto reset;
4551         do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4552                       PRC_PCI_DP_WR_Rn |
4553                       PRC_PCI_DP_F_WR_Rn,
4554                       &bar0->prc_pcix_err_reg,
4555                       &sw_stat->prc_pcix_err_cnt);
4556     }
4557 
4558     if (val64 & RXDMA_INT_RPA_INT_M) {
4559         if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4560                       &bar0->rpa_err_reg,
4561                       &sw_stat->rpa_err_cnt))
4562             goto reset;
4563         do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4564                       &bar0->rpa_err_reg,
4565                       &sw_stat->rpa_err_cnt);
4566     }
4567 
4568     if (val64 & RXDMA_INT_RDA_INT_M) {
4569         if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4570                       RDA_FRM_ECC_DB_N_AERR |
4571                       RDA_SM1_ERR_ALARM |
4572                       RDA_SM0_ERR_ALARM |
4573                       RDA_RXD_ECC_DB_SERR,
4574                       &bar0->rda_err_reg,
4575                       &sw_stat->rda_err_cnt))
4576             goto reset;
4577         do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4578                       RDA_FRM_ECC_SG_ERR |
4579                       RDA_MISC_ERR |
4580                       RDA_PCIX_ERR,
4581                       &bar0->rda_err_reg,
4582                       &sw_stat->rda_err_cnt);
4583     }
4584 
4585     if (val64 & RXDMA_INT_RTI_INT_M) {
4586         if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4587                       &bar0->rti_err_reg,
4588                       &sw_stat->rti_err_cnt))
4589             goto reset;
4590         do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4591                       &bar0->rti_err_reg,
4592                       &sw_stat->rti_err_cnt);
4593     }
4594 
4595     val64 = readq(&bar0->mac_int_status);
4596     if (val64 & MAC_INT_STATUS_RMAC_INT) {
4597         if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4598                       &bar0->mac_rmac_err_reg,
4599                       &sw_stat->mac_rmac_err_cnt))
4600             goto reset;
4601         do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4602                       RMAC_SINGLE_ECC_ERR |
4603                       RMAC_DOUBLE_ECC_ERR,
4604                       &bar0->mac_rmac_err_reg,
4605                       &sw_stat->mac_rmac_err_cnt);
4606     }
4607 
4608     val64 = readq(&bar0->xgxs_int_status);
4609     if (val64 & XGXS_INT_STATUS_RXGXS) {
4610         if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4611                       &bar0->xgxs_rxgxs_err_reg,
4612                       &sw_stat->xgxs_rxgxs_err_cnt))
4613             goto reset;
4614     }
4615 
4616     val64 = readq(&bar0->mc_int_status);
4617     if (val64 & MC_INT_STATUS_MC_INT) {
4618         if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4619                       &bar0->mc_err_reg,
4620                       &sw_stat->mc_err_cnt))
4621             goto reset;
4622 
4623         /* Handling Ecc errors */
4624         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4625             writeq(val64, &bar0->mc_err_reg);
4626             if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4627                 sw_stat->double_ecc_errs++;
4628                 if (sp->device_type != XFRAME_II_DEVICE) {
4629                     /*
4630                      * Reset XframeI only if critical error
4631                      */
4632                     if (val64 &
4633                         (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4634                          MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4635                         goto reset;
4636                 }
4637             } else
4638                 sw_stat->single_ecc_errs++;
4639         }
4640     }
4641     return;
4642 
4643 reset:
4644     s2io_stop_all_tx_queue(sp);
4645     schedule_work(&sp->rst_timer_task);
4646     sw_stat->soft_reset_cnt++;
4647 }
4648 
4649 /**
4650  *  s2io_isr - ISR handler of the device .
4651  *  @irq: the irq of the device.
4652  *  @dev_id: a void pointer to the dev structure of the NIC.
4653  *  Description:  This function is the ISR handler of the device. It
4654  *  identifies the reason for the interrupt and calls the relevant
4655  *  service routines. As a contongency measure, this ISR allocates the
4656  *  recv buffers, if their numbers are below the panic value which is
4657  *  presently set to 25% of the original number of rcv buffers allocated.
4658  *  Return value:
4659  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4660  *   IRQ_NONE: will be returned if interrupt is not from our device
4661  */
4662 static irqreturn_t s2io_isr(int irq, void *dev_id)
4663 {
4664     struct net_device *dev = (struct net_device *)dev_id;
4665     struct s2io_nic *sp = netdev_priv(dev);
4666     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4667     int i;
4668     u64 reason = 0;
4669     struct mac_info *mac_control;
4670     struct config_param *config;
4671 
4672     /* Pretend we handled any irq's from a disconnected card */
4673     if (pci_channel_offline(sp->pdev))
4674         return IRQ_NONE;
4675 
4676     if (!is_s2io_card_up(sp))
4677         return IRQ_NONE;
4678 
4679     config = &sp->config;
4680     mac_control = &sp->mac_control;
4681 
4682     /*
4683      * Identify the cause for interrupt and call the appropriate
4684      * interrupt handler. Causes for the interrupt could be;
4685      * 1. Rx of packet.
4686      * 2. Tx complete.
4687      * 3. Link down.
4688      */
4689     reason = readq(&bar0->general_int_status);
4690 
4691     if (unlikely(reason == S2IO_MINUS_ONE))
4692         return IRQ_HANDLED; /* Nothing much can be done. Get out */
4693 
4694     if (reason &
4695         (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4696         writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4697 
4698         if (config->napi) {
4699             if (reason & GEN_INTR_RXTRAFFIC) {
4700                 napi_schedule(&sp->napi);
4701                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4702                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4703                 readl(&bar0->rx_traffic_int);
4704             }
4705         } else {
4706             /*
4707              * rx_traffic_int reg is an R1 register, writing all 1's
4708              * will ensure that the actual interrupt causing bit
4709              * get's cleared and hence a read can be avoided.
4710              */
4711             if (reason & GEN_INTR_RXTRAFFIC)
4712                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4713 
4714             for (i = 0; i < config->rx_ring_num; i++) {
4715                 struct ring_info *ring = &mac_control->rings[i];
4716 
4717                 rx_intr_handler(ring, 0);
4718             }
4719         }
4720 
4721         /*
4722          * tx_traffic_int reg is an R1 register, writing all 1's
4723          * will ensure that the actual interrupt causing bit get's
4724          * cleared and hence a read can be avoided.
4725          */
4726         if (reason & GEN_INTR_TXTRAFFIC)
4727             writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4728 
4729         for (i = 0; i < config->tx_fifo_num; i++)
4730             tx_intr_handler(&mac_control->fifos[i]);
4731 
4732         if (reason & GEN_INTR_TXPIC)
4733             s2io_txpic_intr_handle(sp);
4734 
4735         /*
4736          * Reallocate the buffers from the interrupt handler itself.
4737          */
4738         if (!config->napi) {
4739             for (i = 0; i < config->rx_ring_num; i++) {
4740                 struct ring_info *ring = &mac_control->rings[i];
4741 
4742                 s2io_chk_rx_buffers(sp, ring);
4743             }
4744         }
4745         writeq(sp->general_int_mask, &bar0->general_int_mask);
4746         readl(&bar0->general_int_status);
4747 
4748         return IRQ_HANDLED;
4749 
4750     } else if (!reason) {
4751         /* The interrupt was not raised by us */
4752         return IRQ_NONE;
4753     }
4754 
4755     return IRQ_HANDLED;
4756 }
4757 
4758 /*
4759  * s2io_updt_stats -
4760  */
4761 static void s2io_updt_stats(struct s2io_nic *sp)
4762 {
4763     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4764     u64 val64;
4765     int cnt = 0;
4766 
4767     if (is_s2io_card_up(sp)) {
4768         /* Apprx 30us on a 133 MHz bus */
4769         val64 = SET_UPDT_CLICKS(10) |
4770             STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4771         writeq(val64, &bar0->stat_cfg);
4772         do {
4773             udelay(100);
4774             val64 = readq(&bar0->stat_cfg);
4775             if (!(val64 & s2BIT(0)))
4776                 break;
4777             cnt++;
4778             if (cnt == 5)
4779                 break; /* Updt failed */
4780         } while (1);
4781     }
4782 }
4783 
4784 /**
4785  *  s2io_get_stats - Updates the device statistics structure.
4786  *  @dev : pointer to the device structure.
4787  *  Description:
4788  *  This function updates the device statistics structure in the s2io_nic
4789  *  structure and returns a pointer to the same.
4790  *  Return value:
4791  *  pointer to the updated net_device_stats structure.
4792  */
4793 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4794 {
4795     struct s2io_nic *sp = netdev_priv(dev);
4796     struct mac_info *mac_control = &sp->mac_control;
4797     struct stat_block *stats = mac_control->stats_info;
4798     u64 delta;
4799 
4800     /* Configure Stats for immediate updt */
4801     s2io_updt_stats(sp);
4802 
4803     /* A device reset will cause the on-adapter statistics to be zero'ed.
4804      * This can be done while running by changing the MTU.  To prevent the
4805      * system from having the stats zero'ed, the driver keeps a copy of the
4806      * last update to the system (which is also zero'ed on reset).  This
4807      * enables the driver to accurately know the delta between the last
4808      * update and the current update.
4809      */
4810     delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4811         le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4812     sp->stats.rx_packets += delta;
4813     dev->stats.rx_packets += delta;
4814 
4815     delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4816         le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4817     sp->stats.tx_packets += delta;
4818     dev->stats.tx_packets += delta;
4819 
4820     delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4821         le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4822     sp->stats.rx_bytes += delta;
4823     dev->stats.rx_bytes += delta;
4824 
4825     delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4826         le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4827     sp->stats.tx_bytes += delta;
4828     dev->stats.tx_bytes += delta;
4829 
4830     delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4831     sp->stats.rx_errors += delta;
4832     dev->stats.rx_errors += delta;
4833 
4834     delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4835         le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4836     sp->stats.tx_errors += delta;
4837     dev->stats.tx_errors += delta;
4838 
4839     delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4840     sp->stats.rx_dropped += delta;
4841     dev->stats.rx_dropped += delta;
4842 
4843     delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4844     sp->stats.tx_dropped += delta;
4845     dev->stats.tx_dropped += delta;
4846 
4847     /* The adapter MAC interprets pause frames as multicast packets, but
4848      * does not pass them up.  This erroneously increases the multicast
4849      * packet count and needs to be deducted when the multicast frame count
4850      * is queried.
4851      */
4852     delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4853         le32_to_cpu(stats->rmac_vld_mcst_frms);
4854     delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4855     delta -= sp->stats.multicast;
4856     sp->stats.multicast += delta;
4857     dev->stats.multicast += delta;
4858 
4859     delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4860         le32_to_cpu(stats->rmac_usized_frms)) +
4861         le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4862     sp->stats.rx_length_errors += delta;
4863     dev->stats.rx_length_errors += delta;
4864 
4865     delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4866     sp->stats.rx_crc_errors += delta;
4867     dev->stats.rx_crc_errors += delta;
4868 
4869     return &dev->stats;
4870 }
4871 
4872 /**
4873  *  s2io_set_multicast - entry point for multicast address enable/disable.
4874  *  @dev : pointer to the device structure
4875  *  @may_sleep: parameter indicates if sleeping when waiting for command
4876  *  complete
4877  *  Description:
4878  *  This function is a driver entry point which gets called by the kernel
4879  *  whenever multicast addresses must be enabled/disabled. This also gets
4880  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4881  *  determine, if multicast address must be enabled or if promiscuous mode
4882  *  is to be disabled etc.
4883  *  Return value:
4884  *  void.
4885  */
4886 static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
4887 {
4888     int i, j, prev_cnt;
4889     struct netdev_hw_addr *ha;
4890     struct s2io_nic *sp = netdev_priv(dev);
4891     struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892     u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4893         0xfeffffffffffULL;
4894     u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4895     void __iomem *add;
4896     struct config_param *config = &sp->config;
4897 
4898     if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899         /*  Enable all Multicast addresses */
4900         writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901                &bar0->rmac_addr_data0_mem);
4902         writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903                &bar0->rmac_addr_data1_mem);
4904         val64 = RMAC_ADDR_CMD_MEM_WE |
4905             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906             RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4907         writeq(val64, &bar0->rmac_addr_cmd_mem);
4908         /* Wait till command completes */
4909         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911                       S2IO_BIT_RESET, may_sleep);
4912 
4913         sp->m_cast_flg = 1;
4914         sp->all_multi_pos = config->max_mc_addr - 1;
4915     } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916         /*  Disable all Multicast addresses */
4917         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918                &bar0->rmac_addr_data0_mem);
4919         writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920                &bar0->rmac_addr_data1_mem);
4921         val64 = RMAC_ADDR_CMD_MEM_WE |
4922             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923             RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4924         writeq(val64, &bar0->rmac_addr_cmd_mem);
4925         /* Wait till command completes */
4926         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4927                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4928                       S2IO_BIT_RESET, may_sleep);
4929 
4930         sp->m_cast_flg = 0;
4931         sp->all_multi_pos = 0;
4932     }
4933 
4934     if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935         /*  Put the NIC into promiscuous mode */
4936         add = &bar0->mac_cfg;
4937         val64 = readq(&bar0->mac_cfg);
4938         val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939 
4940         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4941         writel((u32)val64, add);
4942         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943         writel((u32) (val64 >> 32), (add + 4));
4944 
4945         if (vlan_tag_strip != 1) {
4946             val64 = readq(&bar0->rx_pa_cfg);
4947             val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948             writeq(val64, &bar0->rx_pa_cfg);
4949             sp->vlan_strip_flag = 0;
4950         }
4951 
4952         val64 = readq(&bar0->mac_cfg);
4953         sp->promisc_flg = 1;
4954         DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4955               dev->name);
4956     } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957         /*  Remove the NIC from promiscuous mode */
4958         add = &bar0->mac_cfg;
4959         val64 = readq(&bar0->mac_cfg);
4960         val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961 
4962         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963         writel((u32)val64, add);
4964         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965         writel((u32) (val64 >> 32), (add + 4));
4966 
4967         if (vlan_tag_strip != 0) {
4968             val64 = readq(&bar0->rx_pa_cfg);
4969             val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970             writeq(val64, &bar0->rx_pa_cfg);
4971             sp->vlan_strip_flag = 1;
4972         }
4973 
4974         val64 = readq(&bar0->mac_cfg);
4975         sp->promisc_flg = 0;
4976         DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4977     }
4978 
4979     /*  Update individual M_CAST address list */
4980     if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981         if (netdev_mc_count(dev) >
4982             (config->max_mc_addr - config->max_mac_addr)) {
4983             DBG_PRINT(ERR_DBG,
4984                   "%s: No more Rx filters can be added - "
4985                   "please enable ALL_MULTI instead\n",
4986                   dev->name);
4987             return;
4988         }
4989 
4990         prev_cnt = sp->mc_addr_count;
4991         sp->mc_addr_count = netdev_mc_count(dev);
4992 
4993         /* Clear out the previous list of Mc in the H/W. */
4994         for (i = 0; i < prev_cnt; i++) {
4995             writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996                    &bar0->rmac_addr_data0_mem);
4997             writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4998                    &bar0->rmac_addr_data1_mem);
4999             val64 = RMAC_ADDR_CMD_MEM_WE |
5000                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001                 RMAC_ADDR_CMD_MEM_OFFSET
5002                 (config->mc_start_offset + i);
5003             writeq(val64, &bar0->rmac_addr_cmd_mem);
5004 
5005             /* Wait for command completes */
5006             if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5007                           RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5008                           S2IO_BIT_RESET, may_sleep)) {
5009                 DBG_PRINT(ERR_DBG,
5010                       "%s: Adding Multicasts failed\n",
5011                       dev->name);
5012                 return;
5013             }
5014         }
5015 
5016         /* Create the new Rx filter list and update the same in H/W. */
5017         i = 0;
5018         netdev_for_each_mc_addr(ha, dev) {
5019             mac_addr = 0;
5020             for (j = 0; j < ETH_ALEN; j++) {
5021                 mac_addr |= ha->addr[j];
5022                 mac_addr <<= 8;
5023             }
5024             mac_addr >>= 8;
5025             writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026                    &bar0->rmac_addr_data0_mem);
5027             writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5028                    &bar0->rmac_addr_data1_mem);
5029             val64 = RMAC_ADDR_CMD_MEM_WE |
5030                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031                 RMAC_ADDR_CMD_MEM_OFFSET
5032                 (i + config->mc_start_offset);
5033             writeq(val64, &bar0->rmac_addr_cmd_mem);
5034 
5035             /* Wait for command completes */
5036             if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037                           RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038                           S2IO_BIT_RESET, may_sleep)) {
5039                 DBG_PRINT(ERR_DBG,
5040                       "%s: Adding Multicasts failed\n",
5041                       dev->name);
5042                 return;
5043             }
5044             i++;
5045         }
5046     }
5047 }
5048 
5049 /* NDO wrapper for s2io_set_multicast */
5050 static void s2io_ndo_set_multicast(struct net_device *dev)
5051 {
5052     s2io_set_multicast(dev, false);
5053 }
5054 
5055 /* read from CAM unicast & multicast addresses and store it in
5056  * def_mac_addr structure
5057  */
5058 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5059 {
5060     int offset;
5061     u64 mac_addr = 0x0;
5062     struct config_param *config = &sp->config;
5063 
5064     /* store unicast & multicast mac addresses */
5065     for (offset = 0; offset < config->max_mc_addr; offset++) {
5066         mac_addr = do_s2io_read_unicast_mc(sp, offset);
5067         /* if read fails disable the entry */
5068         if (mac_addr == FAILURE)
5069             mac_addr = S2IO_DISABLE_MAC_ENTRY;
5070         do_s2io_copy_mac_addr(sp, offset, mac_addr);
5071     }
5072 }
5073 
5074 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5075 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5076 {
5077     int offset;
5078     struct config_param *config = &sp->config;
5079     /* restore unicast mac address */
5080     for (offset = 0; offset < config->max_mac_addr; offset++)
5081         do_s2io_prog_unicast(sp->dev,
5082                      sp->def_mac_addr[offset].mac_addr);
5083 
5084     /* restore multicast mac address */
5085     for (offset = config->mc_start_offset;
5086          offset < config->max_mc_addr; offset++)
5087         do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5088 }
5089 
5090 /* add a multicast MAC address to CAM */
5091 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5092 {
5093     int i;
5094     u64 mac_addr = 0;
5095     struct config_param *config = &sp->config;
5096 
5097     for (i = 0; i < ETH_ALEN; i++) {
5098         mac_addr <<= 8;
5099         mac_addr |= addr[i];
5100     }
5101     if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5102         return SUCCESS;
5103 
5104     /* check if the multicast mac already preset in CAM */
5105     for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5106         u64 tmp64;
5107         tmp64 = do_s2io_read_unicast_mc(sp, i);
5108         if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5109             break;
5110 
5111         if (tmp64 == mac_addr)
5112             return SUCCESS;
5113     }
5114     if (i == config->max_mc_addr) {
5115         DBG_PRINT(ERR_DBG,
5116               "CAM full no space left for multicast MAC\n");
5117         return FAILURE;
5118     }
5119     /* Update the internal structure with this new mac address */
5120     do_s2io_copy_mac_addr(sp, i, mac_addr);
5121 
5122     return do_s2io_add_mac(sp, mac_addr, i);
5123 }
5124 
5125 /* add MAC address to CAM */
5126 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5127 {
5128     u64 val64;
5129     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5130 
5131     writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5132            &bar0->rmac_addr_data0_mem);
5133 
5134     val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135         RMAC_ADDR_CMD_MEM_OFFSET(off);
5136     writeq(val64, &bar0->rmac_addr_cmd_mem);
5137 
5138     /* Wait till command completes */
5139     if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5140                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5141                   S2IO_BIT_RESET, true)) {
5142         DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5143         return FAILURE;
5144     }
5145     return SUCCESS;
5146 }
5147 /* deletes a specified unicast/multicast mac entry from CAM */
5148 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5149 {
5150     int offset;
5151     u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5152     struct config_param *config = &sp->config;
5153 
5154     for (offset = 1;
5155          offset < config->max_mc_addr; offset++) {
5156         tmp64 = do_s2io_read_unicast_mc(sp, offset);
5157         if (tmp64 == addr) {
5158             /* disable the entry by writing  0xffffffffffffULL */
5159             if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5160                 return FAILURE;
5161             /* store the new mac list from CAM */
5162             do_s2io_store_unicast_mc(sp);
5163             return SUCCESS;
5164         }
5165     }
5166     DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5167           (unsigned long long)addr);
5168     return FAILURE;
5169 }
5170 
5171 /* read mac entries from CAM */
5172 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5173 {
5174     u64 tmp64, val64;
5175     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176 
5177     /* read mac addr */
5178     val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5179         RMAC_ADDR_CMD_MEM_OFFSET(offset);
5180     writeq(val64, &bar0->rmac_addr_cmd_mem);
5181 
5182     /* Wait till command completes */
5183     if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5184                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5185                   S2IO_BIT_RESET, true)) {
5186         DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5187         return FAILURE;
5188     }
5189     tmp64 = readq(&bar0->rmac_addr_data0_mem);
5190 
5191     return tmp64 >> 16;
5192 }
5193 
5194 /*
5195  * s2io_set_mac_addr - driver entry point
5196  */
5197 
5198 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5199 {
5200     struct sockaddr *addr = p;
5201 
5202     if (!is_valid_ether_addr(addr->sa_data))
5203         return -EADDRNOTAVAIL;
5204 
5205     eth_hw_addr_set(dev, addr->sa_data);
5206 
5207     /* store the MAC address in CAM */
5208     return do_s2io_prog_unicast(dev, dev->dev_addr);
5209 }
5210 /**
5211  *  do_s2io_prog_unicast - Programs the Xframe mac address
5212  *  @dev : pointer to the device structure.
5213  *  @addr: a uchar pointer to the new mac address which is to be set.
5214  *  Description : This procedure will program the Xframe to receive
5215  *  frames with new Mac Address
5216  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5217  *  as defined in errno.h file on failure.
5218  */
5219 
5220 static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
5221 {
5222     struct s2io_nic *sp = netdev_priv(dev);
5223     register u64 mac_addr = 0, perm_addr = 0;
5224     int i;
5225     u64 tmp64;
5226     struct config_param *config = &sp->config;
5227 
5228     /*
5229      * Set the new MAC address as the new unicast filter and reflect this
5230      * change on the device address registered with the OS. It will be
5231      * at offset 0.
5232      */
5233     for (i = 0; i < ETH_ALEN; i++) {
5234         mac_addr <<= 8;
5235         mac_addr |= addr[i];
5236         perm_addr <<= 8;
5237         perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5238     }
5239 
5240     /* check if the dev_addr is different than perm_addr */
5241     if (mac_addr == perm_addr)
5242         return SUCCESS;
5243 
5244     /* check if the mac already preset in CAM */
5245     for (i = 1; i < config->max_mac_addr; i++) {
5246         tmp64 = do_s2io_read_unicast_mc(sp, i);
5247         if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5248             break;
5249 
5250         if (tmp64 == mac_addr) {
5251             DBG_PRINT(INFO_DBG,
5252                   "MAC addr:0x%llx already present in CAM\n",
5253                   (unsigned long long)mac_addr);
5254             return SUCCESS;
5255         }
5256     }
5257     if (i == config->max_mac_addr) {
5258         DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5259         return FAILURE;
5260     }
5261     /* Update the internal structure with this new mac address */
5262     do_s2io_copy_mac_addr(sp, i, mac_addr);
5263 
5264     return do_s2io_add_mac(sp, mac_addr, i);
5265 }
5266 
5267 /**
5268  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5269  * @dev : pointer to netdev
5270  * @cmd: pointer to the structure with parameters given by ethtool to set
5271  * link information.
5272  * Description:
5273  * The function sets different link parameters provided by the user onto
5274  * the NIC.
5275  * Return value:
5276  * 0 on success.
5277  */
5278 
5279 static int
5280 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5281                 const struct ethtool_link_ksettings *cmd)
5282 {
5283     struct s2io_nic *sp = netdev_priv(dev);
5284     if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5285         (cmd->base.speed != SPEED_10000) ||
5286         (cmd->base.duplex != DUPLEX_FULL))
5287         return -EINVAL;
5288     else {
5289         s2io_close(sp->dev);
5290         s2io_open(sp->dev);
5291     }
5292 
5293     return 0;
5294 }
5295 
5296 /**
5297  * s2io_ethtool_get_link_ksettings - Return link specific information.
5298  * @dev: pointer to netdev
5299  * @cmd : pointer to the structure with parameters given by ethtool
5300  * to return link information.
5301  * Description:
5302  * Returns link specific information like speed, duplex etc.. to ethtool.
5303  * Return value :
5304  * return 0 on success.
5305  */
5306 
5307 static int
5308 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5309                 struct ethtool_link_ksettings *cmd)
5310 {
5311     struct s2io_nic *sp = netdev_priv(dev);
5312 
5313     ethtool_link_ksettings_zero_link_mode(cmd, supported);
5314     ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5315     ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5316 
5317     ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5318     ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5319     ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5320 
5321     cmd->base.port = PORT_FIBRE;
5322 
5323     if (netif_carrier_ok(sp->dev)) {
5324         cmd->base.speed = SPEED_10000;
5325         cmd->base.duplex = DUPLEX_FULL;
5326     } else {
5327         cmd->base.speed = SPEED_UNKNOWN;
5328         cmd->base.duplex = DUPLEX_UNKNOWN;
5329     }
5330 
5331     cmd->base.autoneg = AUTONEG_DISABLE;
5332     return 0;
5333 }
5334 
5335 /**
5336  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5337  * @dev: pointer to netdev
5338  * @info : pointer to the structure with parameters given by ethtool to
5339  * return driver information.
5340  * Description:
5341  * Returns driver specefic information like name, version etc.. to ethtool.
5342  * Return value:
5343  *  void
5344  */
5345 
5346 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5347                   struct ethtool_drvinfo *info)
5348 {
5349     struct s2io_nic *sp = netdev_priv(dev);
5350 
5351     strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5352     strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5353     strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5354 }
5355 
5356 /**
5357  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5358  *  @dev: pointer to netdev
5359  *  @regs : pointer to the structure with parameters given by ethtool for
5360  *          dumping the registers.
5361  *  @space: The input argument into which all the registers are dumped.
5362  *  Description:
5363  *  Dumps the entire register space of xFrame NIC into the user given
5364  *  buffer area.
5365  * Return value :
5366  * void .
5367  */
5368 
5369 static void s2io_ethtool_gregs(struct net_device *dev,
5370                    struct ethtool_regs *regs, void *space)
5371 {
5372     int i;
5373     u64 reg;
5374     u8 *reg_space = (u8 *)space;
5375     struct s2io_nic *sp = netdev_priv(dev);
5376 
5377     regs->len = XENA_REG_SPACE;
5378     regs->version = sp->pdev->subsystem_device;
5379 
5380     for (i = 0; i < regs->len; i += 8) {
5381         reg = readq(sp->bar0 + i);
5382         memcpy((reg_space + i), &reg, 8);
5383     }
5384 }
5385 
5386 /*
5387  *  s2io_set_led - control NIC led
5388  */
5389 static void s2io_set_led(struct s2io_nic *sp, bool on)
5390 {
5391     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5392     u16 subid = sp->pdev->subsystem_device;
5393     u64 val64;
5394 
5395     if ((sp->device_type == XFRAME_II_DEVICE) ||
5396         ((subid & 0xFF) >= 0x07)) {
5397         val64 = readq(&bar0->gpio_control);
5398         if (on)
5399             val64 |= GPIO_CTRL_GPIO_0;
5400         else
5401             val64 &= ~GPIO_CTRL_GPIO_0;
5402 
5403         writeq(val64, &bar0->gpio_control);
5404     } else {
5405         val64 = readq(&bar0->adapter_control);
5406         if (on)
5407             val64 |= ADAPTER_LED_ON;
5408         else
5409             val64 &= ~ADAPTER_LED_ON;
5410 
5411         writeq(val64, &bar0->adapter_control);
5412     }
5413 
5414 }
5415 
5416 /**
5417  * s2io_ethtool_set_led - To physically identify the nic on the system.
5418  * @dev : network device
5419  * @state: led setting
5420  *
5421  * Description: Used to physically identify the NIC on the system.
5422  * The Link LED will blink for a time specified by the user for
5423  * identification.
5424  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5425  * identification is possible only if it's link is up.
5426  */
5427 
5428 static int s2io_ethtool_set_led(struct net_device *dev,
5429                 enum ethtool_phys_id_state state)
5430 {
5431     struct s2io_nic *sp = netdev_priv(dev);
5432     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5433     u16 subid = sp->pdev->subsystem_device;
5434 
5435     if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5436         u64 val64 = readq(&bar0->adapter_control);
5437         if (!(val64 & ADAPTER_CNTL_EN)) {
5438             pr_err("Adapter Link down, cannot blink LED\n");
5439             return -EAGAIN;
5440         }
5441     }
5442 
5443     switch (state) {
5444     case ETHTOOL_ID_ACTIVE:
5445         sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5446         return 1;   /* cycle on/off once per second */
5447 
5448     case ETHTOOL_ID_ON:
5449         s2io_set_led(sp, true);
5450         break;
5451 
5452     case ETHTOOL_ID_OFF:
5453         s2io_set_led(sp, false);
5454         break;
5455 
5456     case ETHTOOL_ID_INACTIVE:
5457         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5458             writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5459     }
5460 
5461     return 0;
5462 }
5463 
5464 static void
5465 s2io_ethtool_gringparam(struct net_device *dev,
5466             struct ethtool_ringparam *ering,
5467             struct kernel_ethtool_ringparam *kernel_ering,
5468             struct netlink_ext_ack *extack)
5469 {
5470     struct s2io_nic *sp = netdev_priv(dev);
5471     int i, tx_desc_count = 0, rx_desc_count = 0;
5472 
5473     if (sp->rxd_mode == RXD_MODE_1) {
5474         ering->rx_max_pending = MAX_RX_DESC_1;
5475         ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5476     } else {
5477         ering->rx_max_pending = MAX_RX_DESC_2;
5478         ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5479     }
5480 
5481     ering->tx_max_pending = MAX_TX_DESC;
5482 
5483     for (i = 0; i < sp->config.rx_ring_num; i++)
5484         rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5485     ering->rx_pending = rx_desc_count;
5486     ering->rx_jumbo_pending = rx_desc_count;
5487 
5488     for (i = 0; i < sp->config.tx_fifo_num; i++)
5489         tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5490     ering->tx_pending = tx_desc_count;
5491     DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5492 }
5493 
5494 /**
5495  * s2io_ethtool_getpause_data -Pause frame generation and reception.
5496  * @dev: pointer to netdev
5497  * @ep : pointer to the structure with pause parameters given by ethtool.
5498  * Description:
5499  * Returns the Pause frame generation and reception capability of the NIC.
5500  * Return value:
5501  *  void
5502  */
5503 static void s2io_ethtool_getpause_data(struct net_device *dev,
5504                        struct ethtool_pauseparam *ep)
5505 {
5506     u64 val64;
5507     struct s2io_nic *sp = netdev_priv(dev);
5508     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5509 
5510     val64 = readq(&bar0->rmac_pause_cfg);
5511     if (val64 & RMAC_PAUSE_GEN_ENABLE)
5512         ep->tx_pause = true;
5513     if (val64 & RMAC_PAUSE_RX_ENABLE)
5514         ep->rx_pause = true;
5515     ep->autoneg = false;
5516 }
5517 
5518 /**
5519  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5520  * @dev: pointer to netdev
5521  * @ep : pointer to the structure with pause parameters given by ethtool.
5522  * Description:
5523  * It can be used to set or reset Pause frame generation or reception
5524  * support of the NIC.
5525  * Return value:
5526  * int, returns 0 on Success
5527  */
5528 
5529 static int s2io_ethtool_setpause_data(struct net_device *dev,
5530                       struct ethtool_pauseparam *ep)
5531 {
5532     u64 val64;
5533     struct s2io_nic *sp = netdev_priv(dev);
5534     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5535 
5536     val64 = readq(&bar0->rmac_pause_cfg);
5537     if (ep->tx_pause)
5538         val64 |= RMAC_PAUSE_GEN_ENABLE;
5539     else
5540         val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5541     if (ep->rx_pause)
5542         val64 |= RMAC_PAUSE_RX_ENABLE;
5543     else
5544         val64 &= ~RMAC_PAUSE_RX_ENABLE;
5545     writeq(val64, &bar0->rmac_pause_cfg);
5546     return 0;
5547 }
5548 
5549 #define S2IO_DEV_ID     5
5550 /**
5551  * read_eeprom - reads 4 bytes of data from user given offset.
5552  * @sp : private member of the device structure, which is a pointer to the
5553  *      s2io_nic structure.
5554  * @off : offset at which the data must be written
5555  * @data : Its an output parameter where the data read at the given
5556  *  offset is stored.
5557  * Description:
5558  * Will read 4 bytes of data from the user given offset and return the
5559  * read data.
5560  * NOTE: Will allow to read only part of the EEPROM visible through the
5561  *   I2C bus.
5562  * Return value:
5563  *  -1 on failure and 0 on success.
5564  */
5565 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5566 {
5567     int ret = -1;
5568     u32 exit_cnt = 0;
5569     u64 val64;
5570     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5571 
5572     if (sp->device_type == XFRAME_I_DEVICE) {
5573         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5574             I2C_CONTROL_ADDR(off) |
5575             I2C_CONTROL_BYTE_CNT(0x3) |
5576             I2C_CONTROL_READ |
5577             I2C_CONTROL_CNTL_START;
5578         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5579 
5580         while (exit_cnt < 5) {
5581             val64 = readq(&bar0->i2c_control);
5582             if (I2C_CONTROL_CNTL_END(val64)) {
5583                 *data = I2C_CONTROL_GET_DATA(val64);
5584                 ret = 0;
5585                 break;
5586             }
5587             msleep(50);
5588             exit_cnt++;
5589         }
5590     }
5591 
5592     if (sp->device_type == XFRAME_II_DEVICE) {
5593         val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5594             SPI_CONTROL_BYTECNT(0x3) |
5595             SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5596         SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5597         val64 |= SPI_CONTROL_REQ;
5598         SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5599         while (exit_cnt < 5) {
5600             val64 = readq(&bar0->spi_control);
5601             if (val64 & SPI_CONTROL_NACK) {
5602                 ret = 1;
5603                 break;
5604             } else if (val64 & SPI_CONTROL_DONE) {
5605                 *data = readq(&bar0->spi_data);
5606                 *data &= 0xffffff;
5607                 ret = 0;
5608                 break;
5609             }
5610             msleep(50);
5611             exit_cnt++;
5612         }
5613     }
5614     return ret;
5615 }
5616 
5617 /**
5618  *  write_eeprom - actually writes the relevant part of the data value.
5619  *  @sp : private member of the device structure, which is a pointer to the
5620  *       s2io_nic structure.
5621  *  @off : offset at which the data must be written
5622  *  @data : The data that is to be written
5623  *  @cnt : Number of bytes of the data that are actually to be written into
5624  *  the Eeprom. (max of 3)
5625  * Description:
5626  *  Actually writes the relevant part of the data value into the Eeprom
5627  *  through the I2C bus.
5628  * Return value:
5629  *  0 on success, -1 on failure.
5630  */
5631 
5632 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5633 {
5634     int exit_cnt = 0, ret = -1;
5635     u64 val64;
5636     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5637 
5638     if (sp->device_type == XFRAME_I_DEVICE) {
5639         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5640             I2C_CONTROL_ADDR(off) |
5641             I2C_CONTROL_BYTE_CNT(cnt) |
5642             I2C_CONTROL_SET_DATA((u32)data) |
5643             I2C_CONTROL_CNTL_START;
5644         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5645 
5646         while (exit_cnt < 5) {
5647             val64 = readq(&bar0->i2c_control);
5648             if (I2C_CONTROL_CNTL_END(val64)) {
5649                 if (!(val64 & I2C_CONTROL_NACK))
5650                     ret = 0;
5651                 break;
5652             }
5653             msleep(50);
5654             exit_cnt++;
5655         }
5656     }
5657 
5658     if (sp->device_type == XFRAME_II_DEVICE) {
5659         int write_cnt = (cnt == 8) ? 0 : cnt;
5660         writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5661 
5662         val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5663             SPI_CONTROL_BYTECNT(write_cnt) |
5664             SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5665         SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5666         val64 |= SPI_CONTROL_REQ;
5667         SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5668         while (exit_cnt < 5) {
5669             val64 = readq(&bar0->spi_control);
5670             if (val64 & SPI_CONTROL_NACK) {
5671                 ret = 1;
5672                 break;
5673             } else if (val64 & SPI_CONTROL_DONE) {
5674                 ret = 0;
5675                 break;
5676             }
5677             msleep(50);
5678             exit_cnt++;
5679         }
5680     }
5681     return ret;
5682 }
5683 static void s2io_vpd_read(struct s2io_nic *nic)
5684 {
5685     u8 *vpd_data;
5686     u8 data;
5687     int i = 0, cnt, len, fail = 0;
5688     int vpd_addr = 0x80;
5689     struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5690 
5691     if (nic->device_type == XFRAME_II_DEVICE) {
5692         strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5693         vpd_addr = 0x80;
5694     } else {
5695         strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5696         vpd_addr = 0x50;
5697     }
5698     strcpy(nic->serial_num, "NOT AVAILABLE");
5699 
5700     vpd_data = kmalloc(256, GFP_KERNEL);
5701     if (!vpd_data) {
5702         swstats->mem_alloc_fail_cnt++;
5703         return;
5704     }
5705     swstats->mem_allocated += 256;
5706 
5707     for (i = 0; i < 256; i += 4) {
5708         pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5709         pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5710         pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5711         for (cnt = 0; cnt < 5; cnt++) {
5712             msleep(2);
5713             pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5714             if (data == 0x80)
5715                 break;
5716         }
5717         if (cnt >= 5) {
5718             DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5719             fail = 1;
5720             break;
5721         }
5722         pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5723                       (u32 *)&vpd_data[i]);
5724     }
5725 
5726     if (!fail) {
5727         /* read serial number of adapter */
5728         for (cnt = 0; cnt < 252; cnt++) {
5729             if ((vpd_data[cnt] == 'S') &&
5730                 (vpd_data[cnt+1] == 'N')) {
5731                 len = vpd_data[cnt+2];
5732                 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5733                     memcpy(nic->serial_num,
5734                            &vpd_data[cnt + 3],
5735                            len);
5736                     memset(nic->serial_num+len,
5737                            0,
5738                            VPD_STRING_LEN-len);
5739                     break;
5740                 }
5741             }
5742         }
5743     }
5744 
5745     if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5746         len = vpd_data[1];
5747         memcpy(nic->product_name, &vpd_data[3], len);
5748         nic->product_name[len] = 0;
5749     }
5750     kfree(vpd_data);
5751     swstats->mem_freed += 256;
5752 }
5753 
5754 /**
5755  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5756  *  @dev: pointer to netdev
5757  *  @eeprom : pointer to the user level structure provided by ethtool,
5758  *  containing all relevant information.
5759  *  @data_buf : user defined value to be written into Eeprom.
5760  *  Description: Reads the values stored in the Eeprom at given offset
5761  *  for a given length. Stores these values int the input argument data
5762  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5763  *  Return value:
5764  *  int  0 on success
5765  */
5766 
5767 static int s2io_ethtool_geeprom(struct net_device *dev,
5768                 struct ethtool_eeprom *eeprom, u8 * data_buf)
5769 {
5770     u32 i, valid;
5771     u64 data;
5772     struct s2io_nic *sp = netdev_priv(dev);
5773 
5774     eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5775 
5776     if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5777         eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5778 
5779     for (i = 0; i < eeprom->len; i += 4) {
5780         if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5781             DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5782             return -EFAULT;
5783         }
5784         valid = INV(data);
5785         memcpy((data_buf + i), &valid, 4);
5786     }
5787     return 0;
5788 }
5789 
5790 /**
5791  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5792  *  @dev: pointer to netdev
5793  *  @eeprom : pointer to the user level structure provided by ethtool,
5794  *  containing all relevant information.
5795  *  @data_buf : user defined value to be written into Eeprom.
5796  *  Description:
5797  *  Tries to write the user provided value in the Eeprom, at the offset
5798  *  given by the user.
5799  *  Return value:
5800  *  0 on success, -EFAULT on failure.
5801  */
5802 
5803 static int s2io_ethtool_seeprom(struct net_device *dev,
5804                 struct ethtool_eeprom *eeprom,
5805                 u8 *data_buf)
5806 {
5807     int len = eeprom->len, cnt = 0;
5808     u64 valid = 0, data;
5809     struct s2io_nic *sp = netdev_priv(dev);
5810 
5811     if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5812         DBG_PRINT(ERR_DBG,
5813               "ETHTOOL_WRITE_EEPROM Err: "
5814               "Magic value is wrong, it is 0x%x should be 0x%x\n",
5815               (sp->pdev->vendor | (sp->pdev->device << 16)),
5816               eeprom->magic);
5817         return -EFAULT;
5818     }
5819 
5820     while (len) {
5821         data = (u32)data_buf[cnt] & 0x000000FF;
5822         if (data)
5823             valid = (u32)(data << 24);
5824         else
5825             valid = data;
5826 
5827         if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5828             DBG_PRINT(ERR_DBG,
5829                   "ETHTOOL_WRITE_EEPROM Err: "
5830                   "Cannot write into the specified offset\n");
5831             return -EFAULT;
5832         }
5833         cnt++;
5834         len--;
5835     }
5836 
5837     return 0;
5838 }
5839 
5840 /**
5841  * s2io_register_test - reads and writes into all clock domains.
5842  * @sp : private member of the device structure, which is a pointer to the
5843  * s2io_nic structure.
5844  * @data : variable that returns the result of each of the test conducted b
5845  * by the driver.
5846  * Description:
5847  * Read and write into all clock domains. The NIC has 3 clock domains,
5848  * see that registers in all the three regions are accessible.
5849  * Return value:
5850  * 0 on success.
5851  */
5852 
5853 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5854 {
5855     struct XENA_dev_config __iomem *bar0 = sp->bar0;
5856     u64 val64 = 0, exp_val;
5857     int fail = 0;
5858 
5859     val64 = readq(&bar0->pif_rd_swapper_fb);
5860     if (val64 != 0x123456789abcdefULL) {
5861         fail = 1;
5862         DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5863     }
5864 
5865     val64 = readq(&bar0->rmac_pause_cfg);
5866     if (val64 != 0xc000ffff00000000ULL) {
5867         fail = 1;
5868         DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5869     }
5870 
5871     val64 = readq(&bar0->rx_queue_cfg);
5872     if (sp->device_type == XFRAME_II_DEVICE)
5873         exp_val = 0x0404040404040404ULL;
5874     else
5875         exp_val = 0x0808080808080808ULL;
5876     if (val64 != exp_val) {
5877         fail = 1;
5878         DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5879     }
5880 
5881     val64 = readq(&bar0->xgxs_efifo_cfg);
5882     if (val64 != 0x000000001923141EULL) {
5883         fail = 1;
5884         DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5885     }
5886 
5887     val64 = 0x5A5A5A5A5A5A5A5AULL;
5888     writeq(val64, &bar0->xmsi_data);
5889     val64 = readq(&bar0->xmsi_data);
5890     if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5891         fail = 1;
5892         DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5893     }
5894 
5895     val64 = 0xA5A5A5A5A5A5A5A5ULL;
5896     writeq(val64, &bar0->xmsi_data);
5897     val64 = readq(&bar0->xmsi_data);
5898     if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5899         fail = 1;
5900         DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5901     }
5902 
5903     *data = fail;
5904     return fail;
5905 }
5906 
5907 /**
5908  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5909  * @sp : private member of the device structure, which is a pointer to the
5910  * s2io_nic structure.
5911  * @data:variable that returns the result of each of the test conducted by
5912  * the driver.
5913  * Description:
5914  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5915  * register.
5916  * Return value:
5917  * 0 on success.
5918  */
5919 
5920 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5921 {
5922     int fail = 0;
5923     u64 ret_data, org_4F0, org_7F0;
5924     u8 saved_4F0 = 0, saved_7F0 = 0;
5925     struct net_device *dev = sp->dev;
5926 
5927     /* Test Write Error at offset 0 */
5928     /* Note that SPI interface allows write access to all areas
5929      * of EEPROM. Hence doing all negative testing only for Xframe I.
5930      */
5931     if (sp->device_type == XFRAME_I_DEVICE)
5932         if (!write_eeprom(sp, 0, 0, 3))
5933             fail = 1;
5934 
5935     /* Save current values at offsets 0x4F0 and 0x7F0 */
5936     if (!read_eeprom(sp, 0x4F0, &org_4F0))
5937         saved_4F0 = 1;
5938     if (!read_eeprom(sp, 0x7F0, &org_7F0))
5939         saved_7F0 = 1;
5940 
5941     /* Test Write at offset 4f0 */
5942     if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5943         fail = 1;
5944     if (read_eeprom(sp, 0x4F0, &ret_data))
5945         fail = 1;
5946 
5947     if (ret_data != 0x012345) {
5948         DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5949               "Data written %llx Data read %llx\n",
5950               dev->name, (unsigned long long)0x12345,
5951               (unsigned long long)ret_data);
5952         fail = 1;
5953     }
5954 
5955     /* Reset the EEPROM data go FFFF */
5956     write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5957 
5958     /* Test Write Request Error at offset 0x7c */
5959     if (sp->device_type == XFRAME_I_DEVICE)
5960         if (!write_eeprom(sp, 0x07C, 0, 3))
5961             fail = 1;
5962 
5963     /* Test Write Request at offset 0x7f0 */
5964     if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5965         fail = 1;
5966     if (read_eeprom(sp, 0x7F0, &ret_data))
5967         fail = 1;
5968 
5969     if (ret_data != 0x012345) {
5970         DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5971               "Data written %llx Data read %llx\n",
5972               dev->name, (unsigned long long)0x12345,
5973               (unsigned long long)ret_data);
5974         fail = 1;
5975     }
5976 
5977     /* Reset the EEPROM data go FFFF */
5978     write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5979 
5980     if (sp->device_type == XFRAME_I_DEVICE) {
5981         /* Test Write Error at offset 0x80 */
5982         if (!write_eeprom(sp, 0x080, 0, 3))
5983             fail = 1;
5984 
5985         /* Test Write Error at offset 0xfc */
5986         if (!write_eeprom(sp, 0x0FC, 0, 3))
5987             fail = 1;
5988 
5989         /* Test Write Error at offset 0x100 */
5990         if (!write_eeprom(sp, 0x100, 0, 3))
5991             fail = 1;
5992 
5993         /* Test Write Error at offset 4ec */
5994         if (!write_eeprom(sp, 0x4EC, 0, 3))
5995             fail = 1;
5996     }
5997 
5998     /* Restore values at offsets 0x4F0 and 0x7F0 */
5999     if (saved_4F0)
6000         write_eeprom(sp, 0x4F0, org_4F0, 3);
6001     if (saved_7F0)
6002         write_eeprom(sp, 0x7F0, org_7F0, 3);
6003 
6004     *data = fail;
6005     return fail;
6006 }
6007 
6008 /**
6009  * s2io_bist_test - invokes the MemBist test of the card .
6010  * @sp : private member of the device structure, which is a pointer to the
6011  * s2io_nic structure.
6012  * @data:variable that returns the result of each of the test conducted by
6013  * the driver.
6014  * Description:
6015  * This invokes the MemBist test of the card. We give around
6016  * 2 secs time for the Test to complete. If it's still not complete
6017  * within this peiod, we consider that the test failed.
6018  * Return value:
6019  * 0 on success and -1 on failure.
6020  */
6021 
6022 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6023 {
6024     u8 bist = 0;
6025     int cnt = 0, ret = -1;
6026 
6027     pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6028     bist |= PCI_BIST_START;
6029     pci_write_config_word(sp->pdev, PCI_BIST, bist);
6030 
6031     while (cnt < 20) {
6032         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6033         if (!(bist & PCI_BIST_START)) {
6034             *data = (bist & PCI_BIST_CODE_MASK);
6035             ret = 0;
6036             break;
6037         }
6038         msleep(100);
6039         cnt++;
6040     }
6041 
6042     return ret;
6043 }
6044 
6045 /**
6046  * s2io_link_test - verifies the link state of the nic
6047  * @sp: private member of the device structure, which is a pointer to the
6048  * s2io_nic structure.
6049  * @data: variable that returns the result of each of the test conducted by
6050  * the driver.
6051  * Description:
6052  * The function verifies the link state of the NIC and updates the input
6053  * argument 'data' appropriately.
6054  * Return value:
6055  * 0 on success.
6056  */
6057 
6058 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6059 {
6060     struct XENA_dev_config __iomem *bar0 = sp->bar0;
6061     u64 val64;
6062 
6063     val64 = readq(&bar0->adapter_status);
6064     if (!(LINK_IS_UP(val64)))
6065         *data = 1;
6066     else
6067         *data = 0;
6068 
6069     return *data;
6070 }
6071 
6072 /**
6073  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6074  * @sp: private member of the device structure, which is a pointer to the
6075  * s2io_nic structure.
6076  * @data: variable that returns the result of each of the test
6077  * conducted by the driver.
6078  * Description:
6079  *  This is one of the offline test that tests the read and write
6080  *  access to the RldRam chip on the NIC.
6081  * Return value:
6082  *  0 on success.
6083  */
6084 
6085 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6086 {
6087     struct XENA_dev_config __iomem *bar0 = sp->bar0;
6088     u64 val64;
6089     int cnt, iteration = 0, test_fail = 0;
6090 
6091     val64 = readq(&bar0->adapter_control);
6092     val64 &= ~ADAPTER_ECC_EN;
6093     writeq(val64, &bar0->adapter_control);
6094 
6095     val64 = readq(&bar0->mc_rldram_test_ctrl);
6096     val64 |= MC_RLDRAM_TEST_MODE;
6097     SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6098 
6099     val64 = readq(&bar0->mc_rldram_mrs);
6100     val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6101     SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102 
6103     val64 |= MC_RLDRAM_MRS_ENABLE;
6104     SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6105 
6106     while (iteration < 2) {
6107         val64 = 0x55555555aaaa0000ULL;
6108         if (iteration == 1)
6109             val64 ^= 0xFFFFFFFFFFFF0000ULL;
6110         writeq(val64, &bar0->mc_rldram_test_d0);
6111 
6112         val64 = 0xaaaa5a5555550000ULL;
6113         if (iteration == 1)
6114             val64 ^= 0xFFFFFFFFFFFF0000ULL;
6115         writeq(val64, &bar0->mc_rldram_test_d1);
6116 
6117         val64 = 0x55aaaaaaaa5a0000ULL;
6118         if (iteration == 1)
6119             val64 ^= 0xFFFFFFFFFFFF0000ULL;
6120         writeq(val64, &bar0->mc_rldram_test_d2);
6121 
6122         val64 = (u64) (0x0000003ffffe0100ULL);
6123         writeq(val64, &bar0->mc_rldram_test_add);
6124 
6125         val64 = MC_RLDRAM_TEST_MODE |
6126             MC_RLDRAM_TEST_WRITE |
6127             MC_RLDRAM_TEST_GO;
6128         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6129 
6130         for (cnt = 0; cnt < 5; cnt++) {
6131             val64 = readq(&bar0->mc_rldram_test_ctrl);
6132             if (val64 & MC_RLDRAM_TEST_DONE)
6133                 break;
6134             msleep(200);
6135         }
6136 
6137         if (cnt == 5)
6138             break;
6139 
6140         val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6141         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6142 
6143         for (cnt = 0; cnt < 5; cnt++) {
6144             val64 = readq(&bar0->mc_rldram_test_ctrl);
6145             if (val64 & MC_RLDRAM_TEST_DONE)
6146                 break;
6147             msleep(500);
6148         }
6149 
6150         if (cnt == 5)
6151             break;
6152 
6153         val64 = readq(&bar0->mc_rldram_test_ctrl);
6154         if (!(val64 & MC_RLDRAM_TEST_PASS))
6155             test_fail = 1;
6156 
6157         iteration++;
6158     }
6159 
6160     *data = test_fail;
6161 
6162     /* Bring the adapter out of test mode */
6163     SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6164 
6165     return test_fail;
6166 }
6167 
6168 /**
6169  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6170  *  @dev: pointer to netdev
6171  *  @ethtest : pointer to a ethtool command specific structure that will be
6172  *  returned to the user.
6173  *  @data : variable that returns the result of each of the test
6174  * conducted by the driver.
6175  * Description:
6176  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6177  *  the health of the card.
6178  * Return value:
6179  *  void
6180  */
6181 
6182 static void s2io_ethtool_test(struct net_device *dev,
6183                   struct ethtool_test *ethtest,
6184                   uint64_t *data)
6185 {
6186     struct s2io_nic *sp = netdev_priv(dev);
6187     int orig_state = netif_running(sp->dev);
6188 
6189     if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6190         /* Offline Tests. */
6191         if (orig_state)
6192             s2io_close(sp->dev);
6193 
6194         if (s2io_register_test(sp, &data[0]))
6195             ethtest->flags |= ETH_TEST_FL_FAILED;
6196 
6197         s2io_reset(sp);
6198 
6199         if (s2io_rldram_test(sp, &data[3]))
6200             ethtest->flags |= ETH_TEST_FL_FAILED;
6201 
6202         s2io_reset(sp);
6203 
6204         if (s2io_eeprom_test(sp, &data[1]))
6205             ethtest->flags |= ETH_TEST_FL_FAILED;
6206 
6207         if (s2io_bist_test(sp, &data[4]))
6208             ethtest->flags |= ETH_TEST_FL_FAILED;
6209 
6210         if (orig_state)
6211             s2io_open(sp->dev);
6212 
6213         data[2] = 0;
6214     } else {
6215         /* Online Tests. */
6216         if (!orig_state) {
6217             DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6218                   dev->name);
6219             data[0] = -1;
6220             data[1] = -1;
6221             data[2] = -1;
6222             data[3] = -1;
6223             data[4] = -1;
6224         }
6225 
6226         if (s2io_link_test(sp, &data[2]))
6227             ethtest->flags |= ETH_TEST_FL_FAILED;
6228 
6229         data[0] = 0;
6230         data[1] = 0;
6231         data[3] = 0;
6232         data[4] = 0;
6233     }
6234 }
6235 
6236 static void s2io_get_ethtool_stats(struct net_device *dev,
6237                    struct ethtool_stats *estats,
6238                    u64 *tmp_stats)
6239 {
6240     int i = 0, k;
6241     struct s2io_nic *sp = netdev_priv(dev);
6242     struct stat_block *stats = sp->mac_control.stats_info;
6243     struct swStat *swstats = &stats->sw_stat;
6244     struct xpakStat *xstats = &stats->xpak_stat;
6245 
6246     s2io_updt_stats(sp);
6247     tmp_stats[i++] =
6248         (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6249         le32_to_cpu(stats->tmac_frms);
6250     tmp_stats[i++] =
6251         (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6252         le32_to_cpu(stats->tmac_data_octets);
6253     tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6254     tmp_stats[i++] =
6255         (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6256         le32_to_cpu(stats->tmac_mcst_frms);
6257     tmp_stats[i++] =
6258         (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6259         le32_to_cpu(stats->tmac_bcst_frms);
6260     tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6261     tmp_stats[i++] =
6262         (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6263         le32_to_cpu(stats->tmac_ttl_octets);
6264     tmp_stats[i++] =
6265         (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6266         le32_to_cpu(stats->tmac_ucst_frms);
6267     tmp_stats[i++] =
6268         (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6269         le32_to_cpu(stats->tmac_nucst_frms);
6270     tmp_stats[i++] =
6271         (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6272         le32_to_cpu(stats->tmac_any_err_frms);
6273     tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6274     tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6275     tmp_stats[i++] =
6276         (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6277         le32_to_cpu(stats->tmac_vld_ip);
6278     tmp_stats[i++] =
6279         (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6280         le32_to_cpu(stats->tmac_drop_ip);
6281     tmp_stats[i++] =
6282         (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6283         le32_to_cpu(stats->tmac_icmp);
6284     tmp_stats[i++] =
6285         (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6286         le32_to_cpu(stats->tmac_rst_tcp);
6287     tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6288     tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6289         le32_to_cpu(stats->tmac_udp);
6290     tmp_stats[i++] =
6291         (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6292         le32_to_cpu(stats->rmac_vld_frms);
6293     tmp_stats[i++] =
6294         (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6295         le32_to_cpu(stats->rmac_data_octets);
6296     tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6297     tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6298     tmp_stats[i++] =
6299         (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6300         le32_to_cpu(stats->rmac_vld_mcst_frms);
6301     tmp_stats[i++] =
6302         (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6303         le32_to_cpu(stats->rmac_vld_bcst_frms);
6304     tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6305     tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6306     tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6307     tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6308     tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6309     tmp_stats[i++] =
6310         (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6311         le32_to_cpu(stats->rmac_ttl_octets);
6312     tmp_stats[i++] =
6313         (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6314         | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6315     tmp_stats[i++] =
6316         (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6317         << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6318     tmp_stats[i++] =
6319         (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6320         le32_to_cpu(stats->rmac_discarded_frms);
6321     tmp_stats[i++] =
6322         (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6323         << 32 | le32_to_cpu(stats->rmac_drop_events);
6324     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6325     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6326     tmp_stats[i++] =
6327         (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6328         le32_to_cpu(stats->rmac_usized_frms);
6329     tmp_stats[i++] =
6330         (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6331         le32_to_cpu(stats->rmac_osized_frms);
6332     tmp_stats[i++] =
6333         (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6334         le32_to_cpu(stats->rmac_frag_frms);
6335     tmp_stats[i++] =
6336         (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6337         le32_to_cpu(stats->rmac_jabber_frms);
6338     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6339     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6340     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6341     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6342     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6343     tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6344     tmp_stats[i++] =
6345         (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6346         le32_to_cpu(stats->rmac_ip);
6347     tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6348     tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6349     tmp_stats[i++] =
6350         (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6351         le32_to_cpu(stats->rmac_drop_ip);
6352     tmp_stats[i++] =
6353         (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6354         le32_to_cpu(stats->rmac_icmp);
6355     tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6356     tmp_stats[i++] =
6357         (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6358         le32_to_cpu(stats->rmac_udp);
6359     tmp_stats[i++] =
6360         (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6361         le32_to_cpu(stats->rmac_err_drp_udp);
6362     tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6363     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6364     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6365     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6366     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6367     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6368     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6369     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6370     tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6371     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6372     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6373     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6374     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6375     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6376     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6377     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6378     tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6379     tmp_stats[i++] =
6380         (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6381         le32_to_cpu(stats->rmac_pause_cnt);
6382     tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6383     tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6384     tmp_stats[i++] =
6385         (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6386         le32_to_cpu(stats->rmac_accepted_ip);
6387     tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6388     tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6389     tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6390     tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6391     tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6392     tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6393     tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6394     tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6395     tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6396     tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6397     tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6398     tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6399     tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6400     tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6401     tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6402     tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6403     tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6404     tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6405     tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6406 
6407     /* Enhanced statistics exist only for Hercules */
6408     if (sp->device_type == XFRAME_II_DEVICE) {
6409         tmp_stats[i++] =
6410             le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6411         tmp_stats[i++] =
6412             le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6413         tmp_stats[i++] =
6414             le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6415         tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6416         tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6417         tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6418         tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6419         tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6420         tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6421         tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6422         tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6423         tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6424         tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6425         tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6426         tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6427         tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6428     }
6429 
6430     tmp_stats[i++] = 0;
6431     tmp_stats[i++] = swstats->single_ecc_errs;
6432     tmp_stats[i++] = swstats->double_ecc_errs;
6433     tmp_stats[i++] = swstats->parity_err_cnt;
6434     tmp_stats[i++] = swstats->serious_err_cnt;
6435     tmp_stats[i++] = swstats->soft_reset_cnt;
6436     tmp_stats[i++] = swstats->fifo_full_cnt;
6437     for (k = 0; k < MAX_RX_RINGS; k++)
6438         tmp_stats[i++] = swstats->ring_full_cnt[k];
6439     tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6440     tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6441     tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6442     tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6443     tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6444     tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6445     tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6446     tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6447     tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6448     tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6449     tmp_stats[i++] = xstats->warn_laser_output_power_high;
6450     tmp_stats[i++] = xstats->warn_laser_output_power_low;
6451     tmp_stats[i++] = swstats->clubbed_frms_cnt;
6452     tmp_stats[i++] = swstats->sending_both;
6453     tmp_stats[i++] = swstats->outof_sequence_pkts;
6454     tmp_stats[i++] = swstats->flush_max_pkts;
6455     if (swstats->num_aggregations) {
6456         u64 tmp = swstats->sum_avg_pkts_aggregated;
6457         int count = 0;
6458         /*
6459          * Since 64-bit divide does not work on all platforms,
6460          * do repeated subtraction.
6461          */
6462         while (tmp >= swstats->num_aggregations) {
6463             tmp -= swstats->num_aggregations;
6464             count++;
6465         }
6466         tmp_stats[i++] = count;
6467     } else
6468         tmp_stats[i++] = 0;
6469     tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6470     tmp_stats[i++] = swstats->pci_map_fail_cnt;
6471     tmp_stats[i++] = swstats->watchdog_timer_cnt;
6472     tmp_stats[i++] = swstats->mem_allocated;
6473     tmp_stats[i++] = swstats->mem_freed;
6474     tmp_stats[i++] = swstats->link_up_cnt;
6475     tmp_stats[i++] = swstats->link_down_cnt;
6476     tmp_stats[i++] = swstats->link_up_time;
6477     tmp_stats[i++] = swstats->link_down_time;
6478 
6479     tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6480     tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6481     tmp_stats[i++] = swstats->tx_parity_err_cnt;
6482     tmp_stats[i++] = swstats->tx_link_loss_cnt;
6483     tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6484 
6485     tmp_stats[i++] = swstats->rx_parity_err_cnt;
6486     tmp_stats[i++] = swstats->rx_abort_cnt;
6487     tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6488     tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6489     tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6490     tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6491     tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6492     tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6493     tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6494     tmp_stats[i++] = swstats->tda_err_cnt;
6495     tmp_stats[i++] = swstats->pfc_err_cnt;
6496     tmp_stats[i++] = swstats->pcc_err_cnt;
6497     tmp_stats[i++] = swstats->tti_err_cnt;
6498     tmp_stats[i++] = swstats->tpa_err_cnt;
6499     tmp_stats[i++] = swstats->sm_err_cnt;
6500     tmp_stats[i++] = swstats->lso_err_cnt;
6501     tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6502     tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6503     tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6504     tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6505     tmp_stats[i++] = swstats->rc_err_cnt;
6506     tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6507     tmp_stats[i++] = swstats->rpa_err_cnt;
6508     tmp_stats[i++] = swstats->rda_err_cnt;
6509     tmp_stats[i++] = swstats->rti_err_cnt;
6510     tmp_stats[i++] = swstats->mc_err_cnt;
6511 }
6512 
6513 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6514 {
6515     return XENA_REG_SPACE;
6516 }
6517 
6518 
6519 static int s2io_get_eeprom_len(struct net_device *dev)
6520 {
6521     return XENA_EEPROM_SPACE;
6522 }
6523 
6524 static int s2io_get_sset_count(struct net_device *dev, int sset)
6525 {
6526     struct s2io_nic *sp = netdev_priv(dev);
6527 
6528     switch (sset) {
6529     case ETH_SS_TEST:
6530         return S2IO_TEST_LEN;
6531     case ETH_SS_STATS:
6532         switch (sp->device_type) {
6533         case XFRAME_I_DEVICE:
6534             return XFRAME_I_STAT_LEN;
6535         case XFRAME_II_DEVICE:
6536             return XFRAME_II_STAT_LEN;
6537         default:
6538             return 0;
6539         }
6540     default:
6541         return -EOPNOTSUPP;
6542     }
6543 }
6544 
6545 static void s2io_ethtool_get_strings(struct net_device *dev,
6546                      u32 stringset, u8 *data)
6547 {
6548     int stat_size = 0;
6549     struct s2io_nic *sp = netdev_priv(dev);
6550 
6551     switch (stringset) {
6552     case ETH_SS_TEST:
6553         memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6554         break;
6555     case ETH_SS_STATS:
6556         stat_size = sizeof(ethtool_xena_stats_keys);
6557         memcpy(data, &ethtool_xena_stats_keys, stat_size);
6558         if (sp->device_type == XFRAME_II_DEVICE) {
6559             memcpy(data + stat_size,
6560                    &ethtool_enhanced_stats_keys,
6561                    sizeof(ethtool_enhanced_stats_keys));
6562             stat_size += sizeof(ethtool_enhanced_stats_keys);
6563         }
6564 
6565         memcpy(data + stat_size, &ethtool_driver_stats_keys,
6566                sizeof(ethtool_driver_stats_keys));
6567     }
6568 }
6569 
6570 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6571 {
6572     struct s2io_nic *sp = netdev_priv(dev);
6573     netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6574 
6575     if (changed && netif_running(dev)) {
6576         int rc;
6577 
6578         s2io_stop_all_tx_queue(sp);
6579         s2io_card_down(sp);
6580         dev->features = features;
6581         rc = s2io_card_up(sp);
6582         if (rc)
6583             s2io_reset(sp);
6584         else
6585             s2io_start_all_tx_queue(sp);
6586 
6587         return rc ? rc : 1;
6588     }
6589 
6590     return 0;
6591 }
6592 
6593 static const struct ethtool_ops netdev_ethtool_ops = {
6594     .get_drvinfo = s2io_ethtool_gdrvinfo,
6595     .get_regs_len = s2io_ethtool_get_regs_len,
6596     .get_regs = s2io_ethtool_gregs,
6597     .get_link = ethtool_op_get_link,
6598     .get_eeprom_len = s2io_get_eeprom_len,
6599     .get_eeprom = s2io_ethtool_geeprom,
6600     .set_eeprom = s2io_ethtool_seeprom,
6601     .get_ringparam = s2io_ethtool_gringparam,
6602     .get_pauseparam = s2io_ethtool_getpause_data,
6603     .set_pauseparam = s2io_ethtool_setpause_data,
6604     .self_test = s2io_ethtool_test,
6605     .get_strings = s2io_ethtool_get_strings,
6606     .set_phys_id = s2io_ethtool_set_led,
6607     .get_ethtool_stats = s2io_get_ethtool_stats,
6608     .get_sset_count = s2io_get_sset_count,
6609     .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6610     .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6611 };
6612 
6613 /**
6614  *  s2io_ioctl - Entry point for the Ioctl
6615  *  @dev :  Device pointer.
6616  *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6617  *  a proprietary structure used to pass information to the driver.
6618  *  @cmd :  This is used to distinguish between the different commands that
6619  *  can be passed to the IOCTL functions.
6620  *  Description:
6621  *  Currently there are no special functionality supported in IOCTL, hence
6622  *  function always return EOPNOTSUPPORTED
6623  */
6624 
6625 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6626 {
6627     return -EOPNOTSUPP;
6628 }
6629 
6630 /**
6631  *  s2io_change_mtu - entry point to change MTU size for the device.
6632  *   @dev : device pointer.
6633  *   @new_mtu : the new MTU size for the device.
6634  *   Description: A driver entry point to change MTU size for the device.
6635  *   Before changing the MTU the device must be stopped.
6636  *  Return value:
6637  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6638  *   file on failure.
6639  */
6640 
6641 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6642 {
6643     struct s2io_nic *sp = netdev_priv(dev);
6644     int ret = 0;
6645 
6646     dev->mtu = new_mtu;
6647     if (netif_running(dev)) {
6648         s2io_stop_all_tx_queue(sp);
6649         s2io_card_down(sp);
6650         ret = s2io_card_up(sp);
6651         if (ret) {
6652             DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6653                   __func__);
6654             return ret;
6655         }
6656         s2io_wake_all_tx_queue(sp);
6657     } else { /* Device is down */
6658         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6659         u64 val64 = new_mtu;
6660 
6661         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6662     }
6663 
6664     return ret;
6665 }
6666 
6667 /**
6668  * s2io_set_link - Set the LInk status
6669  * @work: work struct containing a pointer to device private structure
6670  * Description: Sets the link status for the adapter
6671  */
6672 
6673 static void s2io_set_link(struct work_struct *work)
6674 {
6675     struct s2io_nic *nic = container_of(work, struct s2io_nic,
6676                         set_link_task);
6677     struct net_device *dev = nic->dev;
6678     struct XENA_dev_config __iomem *bar0 = nic->bar0;
6679     register u64 val64;
6680     u16 subid;
6681 
6682     rtnl_lock();
6683 
6684     if (!netif_running(dev))
6685         goto out_unlock;
6686 
6687     if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6688         /* The card is being reset, no point doing anything */
6689         goto out_unlock;
6690     }
6691 
6692     subid = nic->pdev->subsystem_device;
6693     if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6694         /*
6695          * Allow a small delay for the NICs self initiated
6696          * cleanup to complete.
6697          */
6698         msleep(100);
6699     }
6700 
6701     val64 = readq(&bar0->adapter_status);
6702     if (LINK_IS_UP(val64)) {
6703         if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6704             if (verify_xena_quiescence(nic)) {
6705                 val64 = readq(&bar0->adapter_control);
6706                 val64 |= ADAPTER_CNTL_EN;
6707                 writeq(val64, &bar0->adapter_control);
6708                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6709                         nic->device_type, subid)) {
6710                     val64 = readq(&bar0->gpio_control);
6711                     val64 |= GPIO_CTRL_GPIO_0;
6712                     writeq(val64, &bar0->gpio_control);
6713                     val64 = readq(&bar0->gpio_control);
6714                 } else {
6715                     val64 |= ADAPTER_LED_ON;
6716                     writeq(val64, &bar0->adapter_control);
6717                 }
6718                 nic->device_enabled_once = true;
6719             } else {
6720                 DBG_PRINT(ERR_DBG,
6721                       "%s: Error: device is not Quiescent\n",
6722                       dev->name);
6723                 s2io_stop_all_tx_queue(nic);
6724             }
6725         }
6726         val64 = readq(&bar0->adapter_control);
6727         val64 |= ADAPTER_LED_ON;
6728         writeq(val64, &bar0->adapter_control);
6729         s2io_link(nic, LINK_UP);
6730     } else {
6731         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6732                               subid)) {
6733             val64 = readq(&bar0->gpio_control);
6734             val64 &= ~GPIO_CTRL_GPIO_0;
6735             writeq(val64, &bar0->gpio_control);
6736             val64 = readq(&bar0->gpio_control);
6737         }
6738         /* turn off LED */
6739         val64 = readq(&bar0->adapter_control);
6740         val64 = val64 & (~ADAPTER_LED_ON);
6741         writeq(val64, &bar0->adapter_control);
6742         s2io_link(nic, LINK_DOWN);
6743     }
6744     clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6745 
6746 out_unlock:
6747     rtnl_unlock();
6748 }
6749 
6750 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6751                   struct buffAdd *ba,
6752                   struct sk_buff **skb, u64 *temp0, u64 *temp1,
6753                   u64 *temp2, int size)
6754 {
6755     struct net_device *dev = sp->dev;
6756     struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6757 
6758     if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6759         struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6760         /* allocate skb */
6761         if (*skb) {
6762             DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6763             /*
6764              * As Rx frame are not going to be processed,
6765              * using same mapped address for the Rxd
6766              * buffer pointer
6767              */
6768             rxdp1->Buffer0_ptr = *temp0;
6769         } else {
6770             *skb = netdev_alloc_skb(dev, size);
6771             if (!(*skb)) {
6772                 DBG_PRINT(INFO_DBG,
6773                       "%s: Out of memory to allocate %s\n",
6774                       dev->name, "1 buf mode SKBs");
6775                 stats->mem_alloc_fail_cnt++;
6776                 return -ENOMEM ;
6777             }
6778             stats->mem_allocated += (*skb)->truesize;
6779             /* storing the mapped addr in a temp variable
6780              * such it will be used for next rxd whose
6781              * Host Control is NULL
6782              */
6783             rxdp1->Buffer0_ptr = *temp0 =
6784                 dma_map_single(&sp->pdev->dev, (*skb)->data,
6785                            size - NET_IP_ALIGN,
6786                            DMA_FROM_DEVICE);
6787             if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6788                 goto memalloc_failed;
6789             rxdp->Host_Control = (unsigned long) (*skb);
6790         }
6791     } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6792         struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6793         /* Two buffer Mode */
6794         if (*skb) {
6795             rxdp3->Buffer2_ptr = *temp2;
6796             rxdp3->Buffer0_ptr = *temp0;
6797             rxdp3->Buffer1_ptr = *temp1;
6798         } else {
6799             *skb = netdev_alloc_skb(dev, size);
6800             if (!(*skb)) {
6801                 DBG_PRINT(INFO_DBG,
6802                       "%s: Out of memory to allocate %s\n",
6803                       dev->name,
6804                       "2 buf mode SKBs");
6805                 stats->mem_alloc_fail_cnt++;
6806                 return -ENOMEM;
6807             }
6808             stats->mem_allocated += (*skb)->truesize;
6809             rxdp3->Buffer2_ptr = *temp2 =
6810                 dma_map_single(&sp->pdev->dev, (*skb)->data,
6811                            dev->mtu + 4, DMA_FROM_DEVICE);
6812             if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6813                 goto memalloc_failed;
6814             rxdp3->Buffer0_ptr = *temp0 =
6815                 dma_map_single(&sp->pdev->dev, ba->ba_0,
6816                            BUF0_LEN, DMA_FROM_DEVICE);
6817             if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6818                 dma_unmap_single(&sp->pdev->dev,
6819                          (dma_addr_t)rxdp3->Buffer2_ptr,
6820                          dev->mtu + 4,
6821                          DMA_FROM_DEVICE);
6822                 goto memalloc_failed;
6823             }
6824             rxdp->Host_Control = (unsigned long) (*skb);
6825 
6826             /* Buffer-1 will be dummy buffer not used */
6827             rxdp3->Buffer1_ptr = *temp1 =
6828                 dma_map_single(&sp->pdev->dev, ba->ba_1,
6829                            BUF1_LEN, DMA_FROM_DEVICE);
6830             if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6831                 dma_unmap_single(&sp->pdev->dev,
6832                          (dma_addr_t)rxdp3->Buffer0_ptr,
6833                          BUF0_LEN, DMA_FROM_DEVICE);
6834                 dma_unmap_single(&sp->pdev->dev,
6835                          (dma_addr_t)rxdp3->Buffer2_ptr,
6836                          dev->mtu + 4,
6837                          DMA_FROM_DEVICE);
6838                 goto memalloc_failed;
6839             }
6840         }
6841     }
6842     return 0;
6843 
6844 memalloc_failed:
6845     stats->pci_map_fail_cnt++;
6846     stats->mem_freed += (*skb)->truesize;
6847     dev_kfree_skb(*skb);
6848     return -ENOMEM;
6849 }
6850 
6851 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6852                 int size)
6853 {
6854     struct net_device *dev = sp->dev;
6855     if (sp->rxd_mode == RXD_MODE_1) {
6856         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6857     } else if (sp->rxd_mode == RXD_MODE_3B) {
6858         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6859         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6860         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6861     }
6862 }
6863 
6864 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6865 {
6866     int i, j, k, blk_cnt = 0, size;
6867     struct config_param *config = &sp->config;
6868     struct mac_info *mac_control = &sp->mac_control;
6869     struct net_device *dev = sp->dev;
6870     struct RxD_t *rxdp = NULL;
6871     struct sk_buff *skb = NULL;
6872     struct buffAdd *ba = NULL;
6873     u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6874 
6875     /* Calculate the size based on ring mode */
6876     size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6877         HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6878     if (sp->rxd_mode == RXD_MODE_1)
6879         size += NET_IP_ALIGN;
6880     else if (sp->rxd_mode == RXD_MODE_3B)
6881         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6882 
6883     for (i = 0; i < config->rx_ring_num; i++) {
6884         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6885         struct ring_info *ring = &mac_control->rings[i];
6886 
6887         blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6888 
6889         for (j = 0; j < blk_cnt; j++) {
6890             for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6891                 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6892                 if (sp->rxd_mode == RXD_MODE_3B)
6893                     ba = &ring->ba[j][k];
6894                 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6895                                &temp0_64,
6896                                &temp1_64,
6897                                &temp2_64,
6898                                size) == -ENOMEM) {
6899                     return 0;
6900                 }
6901 
6902                 set_rxd_buffer_size(sp, rxdp, size);
6903                 dma_wmb();
6904                 /* flip the Ownership bit to Hardware */
6905                 rxdp->Control_1 |= RXD_OWN_XENA;
6906             }
6907         }
6908     }
6909     return 0;
6910 
6911 }
6912 
6913 static int s2io_add_isr(struct s2io_nic *sp)
6914 {
6915     int ret = 0;
6916     struct net_device *dev = sp->dev;
6917     int err = 0;
6918 
6919     if (sp->config.intr_type == MSI_X)
6920         ret = s2io_enable_msi_x(sp);
6921     if (ret) {
6922         DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6923         sp->config.intr_type = INTA;
6924     }
6925 
6926     /*
6927      * Store the values of the MSIX table in
6928      * the struct s2io_nic structure
6929      */
6930     store_xmsi_data(sp);
6931 
6932     /* After proper initialization of H/W, register ISR */
6933     if (sp->config.intr_type == MSI_X) {
6934         int i, msix_rx_cnt = 0;
6935 
6936         for (i = 0; i < sp->num_entries; i++) {
6937             if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6938                 if (sp->s2io_entries[i].type ==
6939                     MSIX_RING_TYPE) {
6940                     snprintf(sp->desc[i],
6941                         sizeof(sp->desc[i]),
6942                         "%s:MSI-X-%d-RX",
6943                         dev->name, i);
6944                     err = request_irq(sp->entries[i].vector,
6945                               s2io_msix_ring_handle,
6946                               0,
6947                               sp->desc[i],
6948                               sp->s2io_entries[i].arg);
6949                 } else if (sp->s2io_entries[i].type ==
6950                        MSIX_ALARM_TYPE) {
6951                     snprintf(sp->desc[i],
6952                         sizeof(sp->desc[i]),
6953                         "%s:MSI-X-%d-TX",
6954                         dev->name, i);
6955                     err = request_irq(sp->entries[i].vector,
6956                               s2io_msix_fifo_handle,
6957                               0,
6958                               sp->desc[i],
6959                               sp->s2io_entries[i].arg);
6960 
6961                 }
6962                 /* if either data or addr is zero print it. */
6963                 if (!(sp->msix_info[i].addr &&
6964                       sp->msix_info[i].data)) {
6965                     DBG_PRINT(ERR_DBG,
6966                           "%s @Addr:0x%llx Data:0x%llx\n",
6967                           sp->desc[i],
6968                           (unsigned long long)
6969                           sp->msix_info[i].addr,
6970                           (unsigned long long)
6971                           ntohl(sp->msix_info[i].data));
6972                 } else
6973                     msix_rx_cnt++;
6974                 if (err) {
6975                     remove_msix_isr(sp);
6976 
6977                     DBG_PRINT(ERR_DBG,
6978                           "%s:MSI-X-%d registration "
6979                           "failed\n", dev->name, i);
6980 
6981                     DBG_PRINT(ERR_DBG,
6982                           "%s: Defaulting to INTA\n",
6983                           dev->name);
6984                     sp->config.intr_type = INTA;
6985                     break;
6986                 }
6987                 sp->s2io_entries[i].in_use =
6988                     MSIX_REGISTERED_SUCCESS;
6989             }
6990         }
6991         if (!err) {
6992             pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6993             DBG_PRINT(INFO_DBG,
6994                   "MSI-X-TX entries enabled through alarm vector\n");
6995         }
6996     }
6997     if (sp->config.intr_type == INTA) {
6998         err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6999                   sp->name, dev);
7000         if (err) {
7001             DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7002                   dev->name);
7003             return -1;
7004         }
7005     }
7006     return 0;
7007 }
7008 
7009 static void s2io_rem_isr(struct s2io_nic *sp)
7010 {
7011     if (sp->config.intr_type == MSI_X)
7012         remove_msix_isr(sp);
7013     else
7014         remove_inta_isr(sp);
7015 }
7016 
7017 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7018 {
7019     int cnt = 0;
7020     struct XENA_dev_config __iomem *bar0 = sp->bar0;
7021     register u64 val64 = 0;
7022     struct config_param *config;
7023     config = &sp->config;
7024 
7025     if (!is_s2io_card_up(sp))
7026         return;
7027 
7028     del_timer_sync(&sp->alarm_timer);
7029     /* If s2io_set_link task is executing, wait till it completes. */
7030     while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7031         msleep(50);
7032     clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7033 
7034     /* Disable napi */
7035     if (sp->config.napi) {
7036         int off = 0;
7037         if (config->intr_type ==  MSI_X) {
7038             for (; off < sp->config.rx_ring_num; off++)
7039                 napi_disable(&sp->mac_control.rings[off].napi);
7040         }
7041         else
7042             napi_disable(&sp->napi);
7043     }
7044 
7045     /* disable Tx and Rx traffic on the NIC */
7046     if (do_io)
7047         stop_nic(sp);
7048 
7049     s2io_rem_isr(sp);
7050 
7051     /* stop the tx queue, indicate link down */
7052     s2io_link(sp, LINK_DOWN);
7053 
7054     /* Check if the device is Quiescent and then Reset the NIC */
7055     while (do_io) {
7056         /* As per the HW requirement we need to replenish the
7057          * receive buffer to avoid the ring bump. Since there is
7058          * no intention of processing the Rx frame at this pointwe are
7059          * just setting the ownership bit of rxd in Each Rx
7060          * ring to HW and set the appropriate buffer size
7061          * based on the ring mode
7062          */
7063         rxd_owner_bit_reset(sp);
7064 
7065         val64 = readq(&bar0->adapter_status);
7066         if (verify_xena_quiescence(sp)) {
7067             if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7068                 break;
7069         }
7070 
7071         msleep(50);
7072         cnt++;
7073         if (cnt == 10) {
7074             DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7075                   "adapter status reads 0x%llx\n",
7076                   (unsigned long long)val64);
7077             break;
7078         }
7079     }
7080     if (do_io)
7081         s2io_reset(sp);
7082 
7083     /* Free all Tx buffers */
7084     free_tx_buffers(sp);
7085 
7086     /* Free all Rx buffers */
7087     free_rx_buffers(sp);
7088 
7089     clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7090 }
7091 
7092 static void s2io_card_down(struct s2io_nic *sp)
7093 {
7094     do_s2io_card_down(sp, 1);
7095 }
7096 
7097 static int s2io_card_up(struct s2io_nic *sp)
7098 {
7099     int i, ret = 0;
7100     struct config_param *config;
7101     struct mac_info *mac_control;
7102     struct net_device *dev = sp->dev;
7103     u16 interruptible;
7104 
7105     /* Initialize the H/W I/O registers */
7106     ret = init_nic(sp);
7107     if (ret != 0) {
7108         DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7109               dev->name);
7110         if (ret != -EIO)
7111             s2io_reset(sp);
7112         return ret;
7113     }
7114 
7115     /*
7116      * Initializing the Rx buffers. For now we are considering only 1
7117      * Rx ring and initializing buffers into 30 Rx blocks
7118      */
7119     config = &sp->config;
7120     mac_control = &sp->mac_control;
7121 
7122     for (i = 0; i < config->rx_ring_num; i++) {
7123         struct ring_info *ring = &mac_control->rings[i];
7124 
7125         ring->mtu = dev->mtu;
7126         ring->lro = !!(dev->features & NETIF_F_LRO);
7127         ret = fill_rx_buffers(sp, ring, 1);
7128         if (ret) {
7129             DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7130                   dev->name);
7131             s2io_reset(sp);
7132             free_rx_buffers(sp);
7133             return -ENOMEM;
7134         }
7135         DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7136               ring->rx_bufs_left);
7137     }
7138 
7139     /* Initialise napi */
7140     if (config->napi) {
7141         if (config->intr_type ==  MSI_X) {
7142             for (i = 0; i < sp->config.rx_ring_num; i++)
7143                 napi_enable(&sp->mac_control.rings[i].napi);
7144         } else {
7145             napi_enable(&sp->napi);
7146         }
7147     }
7148 
7149     /* Maintain the state prior to the open */
7150     if (sp->promisc_flg)
7151         sp->promisc_flg = 0;
7152     if (sp->m_cast_flg) {
7153         sp->m_cast_flg = 0;
7154         sp->all_multi_pos = 0;
7155     }
7156 
7157     /* Setting its receive mode */
7158     s2io_set_multicast(dev, true);
7159 
7160     if (dev->features & NETIF_F_LRO) {
7161         /* Initialize max aggregatable pkts per session based on MTU */
7162         sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7163         /* Check if we can use (if specified) user provided value */
7164         if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7165             sp->lro_max_aggr_per_sess = lro_max_pkts;
7166     }
7167 
7168     /* Enable Rx Traffic and interrupts on the NIC */
7169     if (start_nic(sp)) {
7170         DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7171         s2io_reset(sp);
7172         free_rx_buffers(sp);
7173         return -ENODEV;
7174     }
7175 
7176     /* Add interrupt service routine */
7177     if (s2io_add_isr(sp) != 0) {
7178         if (sp->config.intr_type == MSI_X)
7179             s2io_rem_isr(sp);
7180         s2io_reset(sp);
7181         free_rx_buffers(sp);
7182         return -ENODEV;
7183     }
7184 
7185     timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7186     mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7187 
7188     set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7189 
7190     /*  Enable select interrupts */
7191     en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7192     if (sp->config.intr_type != INTA) {
7193         interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7194         en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7195     } else {
7196         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7197         interruptible |= TX_PIC_INTR;
7198         en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7199     }
7200 
7201     return 0;
7202 }
7203 
7204 /**
7205  * s2io_restart_nic - Resets the NIC.
7206  * @work : work struct containing a pointer to the device private structure
7207  * Description:
7208  * This function is scheduled to be run by the s2io_tx_watchdog
7209  * function after 0.5 secs to reset the NIC. The idea is to reduce
7210  * the run time of the watch dog routine which is run holding a
7211  * spin lock.
7212  */
7213 
7214 static void s2io_restart_nic(struct work_struct *work)
7215 {
7216     struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7217     struct net_device *dev = sp->dev;
7218 
7219     rtnl_lock();
7220 
7221     if (!netif_running(dev))
7222         goto out_unlock;
7223 
7224     s2io_card_down(sp);
7225     if (s2io_card_up(sp)) {
7226         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7227     }
7228     s2io_wake_all_tx_queue(sp);
7229     DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7230 out_unlock:
7231     rtnl_unlock();
7232 }
7233 
7234 /**
7235  *  s2io_tx_watchdog - Watchdog for transmit side.
7236  *  @dev : Pointer to net device structure
7237  *  @txqueue: index of the hanging queue
7238  *  Description:
7239  *  This function is triggered if the Tx Queue is stopped
7240  *  for a pre-defined amount of time when the Interface is still up.
7241  *  If the Interface is jammed in such a situation, the hardware is
7242  *  reset (by s2io_close) and restarted again (by s2io_open) to
7243  *  overcome any problem that might have been caused in the hardware.
7244  *  Return value:
7245  *  void
7246  */
7247 
7248 static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7249 {
7250     struct s2io_nic *sp = netdev_priv(dev);
7251     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7252 
7253     if (netif_carrier_ok(dev)) {
7254         swstats->watchdog_timer_cnt++;
7255         schedule_work(&sp->rst_timer_task);
7256         swstats->soft_reset_cnt++;
7257     }
7258 }
7259 
7260 /**
7261  *   rx_osm_handler - To perform some OS related operations on SKB.
7262  *   @ring_data : the ring from which this RxD was extracted.
7263  *   @rxdp: descriptor
7264  *   Description:
7265  *   This function is called by the Rx interrupt serivce routine to perform
7266  *   some OS related operations on the SKB before passing it to the upper
7267  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7268  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7269  *   to the upper layer. If the checksum is wrong, it increments the Rx
7270  *   packet error count, frees the SKB and returns error.
7271  *   Return value:
7272  *   SUCCESS on success and -1 on failure.
7273  */
7274 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7275 {
7276     struct s2io_nic *sp = ring_data->nic;
7277     struct net_device *dev = ring_data->dev;
7278     struct sk_buff *skb = (struct sk_buff *)
7279         ((unsigned long)rxdp->Host_Control);
7280     int ring_no = ring_data->ring_no;
7281     u16 l3_csum, l4_csum;
7282     unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7283     struct lro *lro;
7284     u8 err_mask;
7285     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7286 
7287     skb->dev = dev;
7288 
7289     if (err) {
7290         /* Check for parity error */
7291         if (err & 0x1)
7292             swstats->parity_err_cnt++;
7293 
7294         err_mask = err >> 48;
7295         switch (err_mask) {
7296         case 1:
7297             swstats->rx_parity_err_cnt++;
7298             break;
7299 
7300         case 2:
7301             swstats->rx_abort_cnt++;
7302             break;
7303 
7304         case 3:
7305             swstats->rx_parity_abort_cnt++;
7306             break;
7307 
7308         case 4:
7309             swstats->rx_rda_fail_cnt++;
7310             break;
7311 
7312         case 5:
7313             swstats->rx_unkn_prot_cnt++;
7314             break;
7315 
7316         case 6:
7317             swstats->rx_fcs_err_cnt++;
7318             break;
7319 
7320         case 7:
7321             swstats->rx_buf_size_err_cnt++;
7322             break;
7323 
7324         case 8:
7325             swstats->rx_rxd_corrupt_cnt++;
7326             break;
7327 
7328         case 15:
7329             swstats->rx_unkn_err_cnt++;
7330             break;
7331         }
7332         /*
7333          * Drop the packet if bad transfer code. Exception being
7334          * 0x5, which could be due to unsupported IPv6 extension header.
7335          * In this case, we let stack handle the packet.
7336          * Note that in this case, since checksum will be incorrect,
7337          * stack will validate the same.
7338          */
7339         if (err_mask != 0x5) {
7340             DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7341                   dev->name, err_mask);
7342             dev->stats.rx_crc_errors++;
7343             swstats->mem_freed
7344                 += skb->truesize;
7345             dev_kfree_skb(skb);
7346             ring_data->rx_bufs_left -= 1;
7347             rxdp->Host_Control = 0;
7348             return 0;
7349         }
7350     }
7351 
7352     rxdp->Host_Control = 0;
7353     if (sp->rxd_mode == RXD_MODE_1) {
7354         int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7355 
7356         skb_put(skb, len);
7357     } else if (sp->rxd_mode == RXD_MODE_3B) {
7358         int get_block = ring_data->rx_curr_get_info.block_index;
7359         int get_off = ring_data->rx_curr_get_info.offset;
7360         int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7361         int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7362         unsigned char *buff = skb_push(skb, buf0_len);
7363 
7364         struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7365         memcpy(buff, ba->ba_0, buf0_len);
7366         skb_put(skb, buf2_len);
7367     }
7368 
7369     if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7370         ((!ring_data->lro) ||
7371          (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7372         (dev->features & NETIF_F_RXCSUM)) {
7373         l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7374         l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7375         if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7376             /*
7377              * NIC verifies if the Checksum of the received
7378              * frame is Ok or not and accordingly returns
7379              * a flag in the RxD.
7380              */
7381             skb->ip_summed = CHECKSUM_UNNECESSARY;
7382             if (ring_data->lro) {
7383                 u32 tcp_len = 0;
7384                 u8 *tcp;
7385                 int ret = 0;
7386 
7387                 ret = s2io_club_tcp_session(ring_data,
7388                                 skb->data, &tcp,
7389                                 &tcp_len, &lro,
7390                                 rxdp, sp);
7391                 switch (ret) {
7392                 case 3: /* Begin anew */
7393                     lro->parent = skb;
7394                     goto aggregate;
7395                 case 1: /* Aggregate */
7396                     lro_append_pkt(sp, lro, skb, tcp_len);
7397                     goto aggregate;
7398                 case 4: /* Flush session */
7399                     lro_append_pkt(sp, lro, skb, tcp_len);
7400                     queue_rx_frame(lro->parent,
7401                                lro->vlan_tag);
7402                     clear_lro_session(lro);
7403                     swstats->flush_max_pkts++;
7404                     goto aggregate;
7405                 case 2: /* Flush both */
7406                     lro->parent->data_len = lro->frags_len;
7407                     swstats->sending_both++;
7408                     queue_rx_frame(lro->parent,
7409                                lro->vlan_tag);
7410                     clear_lro_session(lro);
7411                     goto send_up;
7412                 case 0: /* sessions exceeded */
7413                 case -1: /* non-TCP or not L2 aggregatable */
7414                 case 5: /*
7415                      * First pkt in session not
7416                      * L3/L4 aggregatable
7417                      */
7418                     break;
7419                 default:
7420                     DBG_PRINT(ERR_DBG,
7421                           "%s: Samadhana!!\n",
7422                           __func__);
7423                     BUG();
7424                 }
7425             }
7426         } else {
7427             /*
7428              * Packet with erroneous checksum, let the
7429              * upper layers deal with it.
7430              */
7431             skb_checksum_none_assert(skb);
7432         }
7433     } else
7434         skb_checksum_none_assert(skb);
7435 
7436     swstats->mem_freed += skb->truesize;
7437 send_up:
7438     skb_record_rx_queue(skb, ring_no);
7439     queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7440 aggregate:
7441     sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7442     return SUCCESS;
7443 }
7444 
7445 /**
7446  *  s2io_link - stops/starts the Tx queue.
7447  *  @sp : private member of the device structure, which is a pointer to the
7448  *  s2io_nic structure.
7449  *  @link : inidicates whether link is UP/DOWN.
7450  *  Description:
7451  *  This function stops/starts the Tx queue depending on whether the link
7452  *  status of the NIC is down or up. This is called by the Alarm
7453  *  interrupt handler whenever a link change interrupt comes up.
7454  *  Return value:
7455  *  void.
7456  */
7457 
7458 static void s2io_link(struct s2io_nic *sp, int link)
7459 {
7460     struct net_device *dev = sp->dev;
7461     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7462 
7463     if (link != sp->last_link_state) {
7464         init_tti(sp, link, false);
7465         if (link == LINK_DOWN) {
7466             DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7467             s2io_stop_all_tx_queue(sp);
7468             netif_carrier_off(dev);
7469             if (swstats->link_up_cnt)
7470                 swstats->link_up_time =
7471                     jiffies - sp->start_time;
7472             swstats->link_down_cnt++;
7473         } else {
7474             DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7475             if (swstats->link_down_cnt)
7476                 swstats->link_down_time =
7477                     jiffies - sp->start_time;
7478             swstats->link_up_cnt++;
7479             netif_carrier_on(dev);
7480             s2io_wake_all_tx_queue(sp);
7481         }
7482     }
7483     sp->last_link_state = link;
7484     sp->start_time = jiffies;
7485 }
7486 
7487 /**
7488  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7489  *  @sp : private member of the device structure, which is a pointer to the
7490  *  s2io_nic structure.
7491  *  Description:
7492  *  This function initializes a few of the PCI and PCI-X configuration registers
7493  *  with recommended values.
7494  *  Return value:
7495  *  void
7496  */
7497 
7498 static void s2io_init_pci(struct s2io_nic *sp)
7499 {
7500     u16 pci_cmd = 0, pcix_cmd = 0;
7501 
7502     /* Enable Data Parity Error Recovery in PCI-X command register. */
7503     pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7504                  &(pcix_cmd));
7505     pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7506                   (pcix_cmd | 1));
7507     pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7508                  &(pcix_cmd));
7509 
7510     /* Set the PErr Response bit in PCI command register. */
7511     pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512     pci_write_config_word(sp->pdev, PCI_COMMAND,
7513                   (pci_cmd | PCI_COMMAND_PARITY));
7514     pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7515 }
7516 
7517 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7518                 u8 *dev_multiq)
7519 {
7520     int i;
7521 
7522     if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7523         DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7524               "(%d) not supported\n", tx_fifo_num);
7525 
7526         if (tx_fifo_num < 1)
7527             tx_fifo_num = 1;
7528         else
7529             tx_fifo_num = MAX_TX_FIFOS;
7530 
7531         DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7532     }
7533 
7534     if (multiq)
7535         *dev_multiq = multiq;
7536 
7537     if (tx_steering_type && (1 == tx_fifo_num)) {
7538         if (tx_steering_type != TX_DEFAULT_STEERING)
7539             DBG_PRINT(ERR_DBG,
7540                   "Tx steering is not supported with "
7541                   "one fifo. Disabling Tx steering.\n");
7542         tx_steering_type = NO_STEERING;
7543     }
7544 
7545     if ((tx_steering_type < NO_STEERING) ||
7546         (tx_steering_type > TX_DEFAULT_STEERING)) {
7547         DBG_PRINT(ERR_DBG,
7548               "Requested transmit steering not supported\n");
7549         DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7550         tx_steering_type = NO_STEERING;
7551     }
7552 
7553     if (rx_ring_num > MAX_RX_RINGS) {
7554         DBG_PRINT(ERR_DBG,
7555               "Requested number of rx rings not supported\n");
7556         DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7557               MAX_RX_RINGS);
7558         rx_ring_num = MAX_RX_RINGS;
7559     }
7560 
7561     if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7562         DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7563               "Defaulting to INTA\n");
7564         *dev_intr_type = INTA;
7565     }
7566 
7567     if ((*dev_intr_type == MSI_X) &&
7568         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7569          (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7570         DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7571               "Defaulting to INTA\n");
7572         *dev_intr_type = INTA;
7573     }
7574 
7575     if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7576         DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7577         DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7578         rx_ring_mode = 1;
7579     }
7580 
7581     for (i = 0; i < MAX_RX_RINGS; i++)
7582         if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7583             DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7584                   "supported\nDefaulting to %d\n",
7585                   MAX_RX_BLOCKS_PER_RING);
7586             rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7587         }
7588 
7589     return SUCCESS;
7590 }
7591 
7592 /**
7593  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7594  * @nic: device private variable
7595  * @ds_codepoint: data
7596  * @ring: ring index
7597  * Description: The function configures the receive steering to
7598  * desired receive ring.
7599  * Return Value:  SUCCESS on success and
7600  * '-1' on failure (endian settings incorrect).
7601  */
7602 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7603 {
7604     struct XENA_dev_config __iomem *bar0 = nic->bar0;
7605     register u64 val64 = 0;
7606 
7607     if (ds_codepoint > 63)
7608         return FAILURE;
7609 
7610     val64 = RTS_DS_MEM_DATA(ring);
7611     writeq(val64, &bar0->rts_ds_mem_data);
7612 
7613     val64 = RTS_DS_MEM_CTRL_WE |
7614         RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7615         RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7616 
7617     writeq(val64, &bar0->rts_ds_mem_ctrl);
7618 
7619     return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7620                      RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7621                      S2IO_BIT_RESET, true);
7622 }
7623 
7624 static const struct net_device_ops s2io_netdev_ops = {
7625     .ndo_open           = s2io_open,
7626     .ndo_stop           = s2io_close,
7627     .ndo_get_stats          = s2io_get_stats,
7628     .ndo_start_xmit     = s2io_xmit,
7629     .ndo_validate_addr  = eth_validate_addr,
7630     .ndo_set_rx_mode    = s2io_ndo_set_multicast,
7631     .ndo_eth_ioctl      = s2io_ioctl,
7632     .ndo_set_mac_address    = s2io_set_mac_addr,
7633     .ndo_change_mtu     = s2io_change_mtu,
7634     .ndo_set_features   = s2io_set_features,
7635     .ndo_tx_timeout     = s2io_tx_watchdog,
7636 #ifdef CONFIG_NET_POLL_CONTROLLER
7637     .ndo_poll_controller    = s2io_netpoll,
7638 #endif
7639 };
7640 
7641 /**
7642  *  s2io_init_nic - Initialization of the adapter .
7643  *  @pdev : structure containing the PCI related information of the device.
7644  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7645  *  Description:
7646  *  The function initializes an adapter identified by the pci_dec structure.
7647  *  All OS related initialization including memory and device structure and
7648  *  initlaization of the device private variable is done. Also the swapper
7649  *  control register is initialized to enable read and write into the I/O
7650  *  registers of the device.
7651  *  Return value:
7652  *  returns 0 on success and negative on failure.
7653  */
7654 
7655 static int
7656 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7657 {
7658     struct s2io_nic *sp;
7659     struct net_device *dev;
7660     int i, j, ret;
7661     u32 mac_up, mac_down;
7662     u64 val64 = 0, tmp64 = 0;
7663     struct XENA_dev_config __iomem *bar0 = NULL;
7664     u16 subid;
7665     struct config_param *config;
7666     struct mac_info *mac_control;
7667     int mode;
7668     u8 dev_intr_type = intr_type;
7669     u8 dev_multiq = 0;
7670 
7671     ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7672     if (ret)
7673         return ret;
7674 
7675     ret = pci_enable_device(pdev);
7676     if (ret) {
7677         DBG_PRINT(ERR_DBG,
7678               "%s: pci_enable_device failed\n", __func__);
7679         return ret;
7680     }
7681 
7682     if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7683         DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7684     } else {
7685         pci_disable_device(pdev);
7686         return -ENOMEM;
7687     }
7688     ret = pci_request_regions(pdev, s2io_driver_name);
7689     if (ret) {
7690         DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7691               __func__, ret);
7692         pci_disable_device(pdev);
7693         return -ENODEV;
7694     }
7695     if (dev_multiq)
7696         dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7697     else
7698         dev = alloc_etherdev(sizeof(struct s2io_nic));
7699     if (dev == NULL) {
7700         pci_disable_device(pdev);
7701         pci_release_regions(pdev);
7702         return -ENODEV;
7703     }
7704 
7705     pci_set_master(pdev);
7706     pci_set_drvdata(pdev, dev);
7707     SET_NETDEV_DEV(dev, &pdev->dev);
7708 
7709     /*  Private member variable initialized to s2io NIC structure */
7710     sp = netdev_priv(dev);
7711     sp->dev = dev;
7712     sp->pdev = pdev;
7713     sp->device_enabled_once = false;
7714     if (rx_ring_mode == 1)
7715         sp->rxd_mode = RXD_MODE_1;
7716     if (rx_ring_mode == 2)
7717         sp->rxd_mode = RXD_MODE_3B;
7718 
7719     sp->config.intr_type = dev_intr_type;
7720 
7721     if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7722         (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7723         sp->device_type = XFRAME_II_DEVICE;
7724     else
7725         sp->device_type = XFRAME_I_DEVICE;
7726 
7727 
7728     /* Initialize some PCI/PCI-X fields of the NIC. */
7729     s2io_init_pci(sp);
7730 
7731     /*
7732      * Setting the device configuration parameters.
7733      * Most of these parameters can be specified by the user during
7734      * module insertion as they are module loadable parameters. If
7735      * these parameters are not specified during load time, they
7736      * are initialized with default values.
7737      */
7738     config = &sp->config;
7739     mac_control = &sp->mac_control;
7740 
7741     config->napi = napi;
7742     config->tx_steering_type = tx_steering_type;
7743 
7744     /* Tx side parameters. */
7745     if (config->tx_steering_type == TX_PRIORITY_STEERING)
7746         config->tx_fifo_num = MAX_TX_FIFOS;
7747     else
7748         config->tx_fifo_num = tx_fifo_num;
7749 
7750     /* Initialize the fifos used for tx steering */
7751     if (config->tx_fifo_num < 5) {
7752         if (config->tx_fifo_num  == 1)
7753             sp->total_tcp_fifos = 1;
7754         else
7755             sp->total_tcp_fifos = config->tx_fifo_num - 1;
7756         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7757         sp->total_udp_fifos = 1;
7758         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7759     } else {
7760         sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7761                        FIFO_OTHER_MAX_NUM);
7762         sp->udp_fifo_idx = sp->total_tcp_fifos;
7763         sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7764         sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7765     }
7766 
7767     config->multiq = dev_multiq;
7768     for (i = 0; i < config->tx_fifo_num; i++) {
7769         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7770 
7771         tx_cfg->fifo_len = tx_fifo_len[i];
7772         tx_cfg->fifo_priority = i;
7773     }
7774 
7775     /* mapping the QoS priority to the configured fifos */
7776     for (i = 0; i < MAX_TX_FIFOS; i++)
7777         config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7778 
7779     /* map the hashing selector table to the configured fifos */
7780     for (i = 0; i < config->tx_fifo_num; i++)
7781         sp->fifo_selector[i] = fifo_selector[i];
7782 
7783 
7784     config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7785     for (i = 0; i < config->tx_fifo_num; i++) {
7786         struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7787 
7788         tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7789         if (tx_cfg->fifo_len < 65) {
7790             config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7791             break;
7792         }
7793     }
7794     /* + 2 because one Txd for skb->data and one Txd for UFO */
7795     config->max_txds = MAX_SKB_FRAGS + 2;
7796 
7797     /* Rx side parameters. */
7798     config->rx_ring_num = rx_ring_num;
7799     for (i = 0; i < config->rx_ring_num; i++) {
7800         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7801         struct ring_info *ring = &mac_control->rings[i];
7802 
7803         rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7804         rx_cfg->ring_priority = i;
7805         ring->rx_bufs_left = 0;
7806         ring->rxd_mode = sp->rxd_mode;
7807         ring->rxd_count = rxd_count[sp->rxd_mode];
7808         ring->pdev = sp->pdev;
7809         ring->dev = sp->dev;
7810     }
7811 
7812     for (i = 0; i < rx_ring_num; i++) {
7813         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7814 
7815         rx_cfg->ring_org = RING_ORG_BUFF1;
7816         rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7817     }
7818 
7819     /*  Setting Mac Control parameters */
7820     mac_control->rmac_pause_time = rmac_pause_time;
7821     mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7822     mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7823 
7824 
7825     /*  initialize the shared memory used by the NIC and the host */
7826     if (init_shared_mem(sp)) {
7827         DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7828         ret = -ENOMEM;
7829         goto mem_alloc_failed;
7830     }
7831 
7832     sp->bar0 = pci_ioremap_bar(pdev, 0);
7833     if (!sp->bar0) {
7834         DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7835               dev->name);
7836         ret = -ENOMEM;
7837         goto bar0_remap_failed;
7838     }
7839 
7840     sp->bar1 = pci_ioremap_bar(pdev, 2);
7841     if (!sp->bar1) {
7842         DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7843               dev->name);
7844         ret = -ENOMEM;
7845         goto bar1_remap_failed;
7846     }
7847 
7848     /* Initializing the BAR1 address as the start of the FIFO pointer. */
7849     for (j = 0; j < MAX_TX_FIFOS; j++) {
7850         mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7851     }
7852 
7853     /*  Driver entry points */
7854     dev->netdev_ops = &s2io_netdev_ops;
7855     dev->ethtool_ops = &netdev_ethtool_ops;
7856     dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7857         NETIF_F_TSO | NETIF_F_TSO6 |
7858         NETIF_F_RXCSUM | NETIF_F_LRO;
7859     dev->features |= dev->hw_features |
7860         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
7861         NETIF_F_HIGHDMA;
7862     dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7863     INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7864     INIT_WORK(&sp->set_link_task, s2io_set_link);
7865 
7866     pci_save_state(sp->pdev);
7867 
7868     /* Setting swapper control on the NIC, for proper reset operation */
7869     if (s2io_set_swapper(sp)) {
7870         DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7871               dev->name);
7872         ret = -EAGAIN;
7873         goto set_swap_failed;
7874     }
7875 
7876     /* Verify if the Herc works on the slot its placed into */
7877     if (sp->device_type & XFRAME_II_DEVICE) {
7878         mode = s2io_verify_pci_mode(sp);
7879         if (mode < 0) {
7880             DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7881                   __func__);
7882             ret = -EBADSLT;
7883             goto set_swap_failed;
7884         }
7885     }
7886 
7887     if (sp->config.intr_type == MSI_X) {
7888         sp->num_entries = config->rx_ring_num + 1;
7889         ret = s2io_enable_msi_x(sp);
7890 
7891         if (!ret) {
7892             ret = s2io_test_msi(sp);
7893             /* rollback MSI-X, will re-enable during add_isr() */
7894             remove_msix_isr(sp);
7895         }
7896         if (ret) {
7897 
7898             DBG_PRINT(ERR_DBG,
7899                   "MSI-X requested but failed to enable\n");
7900             sp->config.intr_type = INTA;
7901         }
7902     }
7903 
7904     if (config->intr_type ==  MSI_X) {
7905         for (i = 0; i < config->rx_ring_num ; i++) {
7906             struct ring_info *ring = &mac_control->rings[i];
7907 
7908             netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7909         }
7910     } else {
7911         netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7912     }
7913 
7914     /* Not needed for Herc */
7915     if (sp->device_type & XFRAME_I_DEVICE) {
7916         /*
7917          * Fix for all "FFs" MAC address problems observed on
7918          * Alpha platforms
7919          */
7920         fix_mac_address(sp);
7921         s2io_reset(sp);
7922     }
7923 
7924     /*
7925      * MAC address initialization.
7926      * For now only one mac address will be read and used.
7927      */
7928     bar0 = sp->bar0;
7929     val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7930         RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7931     writeq(val64, &bar0->rmac_addr_cmd_mem);
7932     wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7933                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7934                   S2IO_BIT_RESET, true);
7935     tmp64 = readq(&bar0->rmac_addr_data0_mem);
7936     mac_down = (u32)tmp64;
7937     mac_up = (u32) (tmp64 >> 32);
7938 
7939     sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7940     sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7941     sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7942     sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7943     sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7944     sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7945 
7946     /*  Set the factory defined MAC address initially   */
7947     dev->addr_len = ETH_ALEN;
7948     eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
7949 
7950     /* initialize number of multicast & unicast MAC entries variables */
7951     if (sp->device_type == XFRAME_I_DEVICE) {
7952         config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7953         config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7954         config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7955     } else if (sp->device_type == XFRAME_II_DEVICE) {
7956         config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7957         config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7958         config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7959     }
7960 
7961     /* MTU range: 46 - 9600 */
7962     dev->min_mtu = MIN_MTU;
7963     dev->max_mtu = S2IO_JUMBO_SIZE;
7964 
7965     /* store mac addresses from CAM to s2io_nic structure */
7966     do_s2io_store_unicast_mc(sp);
7967 
7968     /* Configure MSIX vector for number of rings configured plus one */
7969     if ((sp->device_type == XFRAME_II_DEVICE) &&
7970         (config->intr_type == MSI_X))
7971         sp->num_entries = config->rx_ring_num + 1;
7972 
7973     /* Store the values of the MSIX table in the s2io_nic structure */
7974     store_xmsi_data(sp);
7975     /* reset Nic and bring it to known state */
7976     s2io_reset(sp);
7977 
7978     /*
7979      * Initialize link state flags
7980      * and the card state parameter
7981      */
7982     sp->state = 0;
7983 
7984     /* Initialize spinlocks */
7985     for (i = 0; i < sp->config.tx_fifo_num; i++) {
7986         struct fifo_info *fifo = &mac_control->fifos[i];
7987 
7988         spin_lock_init(&fifo->tx_lock);
7989     }
7990 
7991     /*
7992      * SXE-002: Configure link and activity LED to init state
7993      * on driver load.
7994      */
7995     subid = sp->pdev->subsystem_device;
7996     if ((subid & 0xFF) >= 0x07) {
7997         val64 = readq(&bar0->gpio_control);
7998         val64 |= 0x0000800000000000ULL;
7999         writeq(val64, &bar0->gpio_control);
8000         val64 = 0x0411040400000000ULL;
8001         writeq(val64, (void __iomem *)bar0 + 0x2700);
8002         val64 = readq(&bar0->gpio_control);
8003     }
8004 
8005     sp->rx_csum = 1;    /* Rx chksum verify enabled by default */
8006 
8007     if (register_netdev(dev)) {
8008         DBG_PRINT(ERR_DBG, "Device registration failed\n");
8009         ret = -ENODEV;
8010         goto register_failed;
8011     }
8012     s2io_vpd_read(sp);
8013     DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8014     DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8015           sp->product_name, pdev->revision);
8016     DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8017           s2io_driver_version);
8018     DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8019     DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8020     if (sp->device_type & XFRAME_II_DEVICE) {
8021         mode = s2io_print_pci_mode(sp);
8022         if (mode < 0) {
8023             ret = -EBADSLT;
8024             unregister_netdev(dev);
8025             goto set_swap_failed;
8026         }
8027     }
8028     switch (sp->rxd_mode) {
8029     case RXD_MODE_1:
8030         DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8031               dev->name);
8032         break;
8033     case RXD_MODE_3B:
8034         DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8035               dev->name);
8036         break;
8037     }
8038 
8039     switch (sp->config.napi) {
8040     case 0:
8041         DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8042         break;
8043     case 1:
8044         DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8045         break;
8046     }
8047 
8048     DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8049           sp->config.tx_fifo_num);
8050 
8051     DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8052           sp->config.rx_ring_num);
8053 
8054     switch (sp->config.intr_type) {
8055     case INTA:
8056         DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8057         break;
8058     case MSI_X:
8059         DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8060         break;
8061     }
8062     if (sp->config.multiq) {
8063         for (i = 0; i < sp->config.tx_fifo_num; i++) {
8064             struct fifo_info *fifo = &mac_control->fifos[i];
8065 
8066             fifo->multiq = config->multiq;
8067         }
8068         DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8069               dev->name);
8070     } else
8071         DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8072               dev->name);
8073 
8074     switch (sp->config.tx_steering_type) {
8075     case NO_STEERING:
8076         DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8077               dev->name);
8078         break;
8079     case TX_PRIORITY_STEERING:
8080         DBG_PRINT(ERR_DBG,
8081               "%s: Priority steering enabled for transmit\n",
8082               dev->name);
8083         break;
8084     case TX_DEFAULT_STEERING:
8085         DBG_PRINT(ERR_DBG,
8086               "%s: Default steering enabled for transmit\n",
8087               dev->name);
8088     }
8089 
8090     DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8091           dev->name);
8092     /* Initialize device name */
8093     snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8094          sp->product_name);
8095 
8096     if (vlan_tag_strip)
8097         sp->vlan_strip_flag = 1;
8098     else
8099         sp->vlan_strip_flag = 0;
8100 
8101     /*
8102      * Make Link state as off at this point, when the Link change
8103      * interrupt comes the state will be automatically changed to
8104      * the right state.
8105      */
8106     netif_carrier_off(dev);
8107 
8108     return 0;
8109 
8110 register_failed:
8111 set_swap_failed:
8112     iounmap(sp->bar1);
8113 bar1_remap_failed:
8114     iounmap(sp->bar0);
8115 bar0_remap_failed:
8116 mem_alloc_failed:
8117     free_shared_mem(sp);
8118     pci_disable_device(pdev);
8119     pci_release_regions(pdev);
8120     free_netdev(dev);
8121 
8122     return ret;
8123 }
8124 
8125 /**
8126  * s2io_rem_nic - Free the PCI device
8127  * @pdev: structure containing the PCI related information of the device.
8128  * Description: This function is called by the Pci subsystem to release a
8129  * PCI device and free up all resource held up by the device. This could
8130  * be in response to a Hot plug event or when the driver is to be removed
8131  * from memory.
8132  */
8133 
8134 static void s2io_rem_nic(struct pci_dev *pdev)
8135 {
8136     struct net_device *dev = pci_get_drvdata(pdev);
8137     struct s2io_nic *sp;
8138 
8139     if (dev == NULL) {
8140         DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8141         return;
8142     }
8143 
8144     sp = netdev_priv(dev);
8145 
8146     cancel_work_sync(&sp->rst_timer_task);
8147     cancel_work_sync(&sp->set_link_task);
8148 
8149     unregister_netdev(dev);
8150 
8151     free_shared_mem(sp);
8152     iounmap(sp->bar0);
8153     iounmap(sp->bar1);
8154     pci_release_regions(pdev);
8155     free_netdev(dev);
8156     pci_disable_device(pdev);
8157 }
8158 
8159 module_pci_driver(s2io_driver);
8160 
8161 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8162                 struct tcphdr **tcp, struct RxD_t *rxdp,
8163                 struct s2io_nic *sp)
8164 {
8165     int ip_off;
8166     u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8167 
8168     if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8169         DBG_PRINT(INIT_DBG,
8170               "%s: Non-TCP frames not supported for LRO\n",
8171               __func__);
8172         return -1;
8173     }
8174 
8175     /* Checking for DIX type or DIX type with VLAN */
8176     if ((l2_type == 0) || (l2_type == 4)) {
8177         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8178         /*
8179          * If vlan stripping is disabled and the frame is VLAN tagged,
8180          * shift the offset by the VLAN header size bytes.
8181          */
8182         if ((!sp->vlan_strip_flag) &&
8183             (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8184             ip_off += HEADER_VLAN_SIZE;
8185     } else {
8186         /* LLC, SNAP etc are considered non-mergeable */
8187         return -1;
8188     }
8189 
8190     *ip = (struct iphdr *)(buffer + ip_off);
8191     ip_len = (u8)((*ip)->ihl);
8192     ip_len <<= 2;
8193     *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8194 
8195     return 0;
8196 }
8197 
8198 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8199                   struct tcphdr *tcp)
8200 {
8201     DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8202     if ((lro->iph->saddr != ip->saddr) ||
8203         (lro->iph->daddr != ip->daddr) ||
8204         (lro->tcph->source != tcp->source) ||
8205         (lro->tcph->dest != tcp->dest))
8206         return -1;
8207     return 0;
8208 }
8209 
8210 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8211 {
8212     return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8213 }
8214 
8215 static void initiate_new_session(struct lro *lro, u8 *l2h,
8216                  struct iphdr *ip, struct tcphdr *tcp,
8217                  u32 tcp_pyld_len, u16 vlan_tag)
8218 {
8219     DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8220     lro->l2h = l2h;
8221     lro->iph = ip;
8222     lro->tcph = tcp;
8223     lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8224     lro->tcp_ack = tcp->ack_seq;
8225     lro->sg_num = 1;
8226     lro->total_len = ntohs(ip->tot_len);
8227     lro->frags_len = 0;
8228     lro->vlan_tag = vlan_tag;
8229     /*
8230      * Check if we saw TCP timestamp.
8231      * Other consistency checks have already been done.
8232      */
8233     if (tcp->doff == 8) {
8234         __be32 *ptr;
8235         ptr = (__be32 *)(tcp+1);
8236         lro->saw_ts = 1;
8237         lro->cur_tsval = ntohl(*(ptr+1));
8238         lro->cur_tsecr = *(ptr+2);
8239     }
8240     lro->in_use = 1;
8241 }
8242 
8243 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8244 {
8245     struct iphdr *ip = lro->iph;
8246     struct tcphdr *tcp = lro->tcph;
8247     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8248 
8249     DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8250 
8251     /* Update L3 header */
8252     csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8253     ip->tot_len = htons(lro->total_len);
8254 
8255     /* Update L4 header */
8256     tcp->ack_seq = lro->tcp_ack;
8257     tcp->window = lro->window;
8258 
8259     /* Update tsecr field if this session has timestamps enabled */
8260     if (lro->saw_ts) {
8261         __be32 *ptr = (__be32 *)(tcp + 1);
8262         *(ptr+2) = lro->cur_tsecr;
8263     }
8264 
8265     /* Update counters required for calculation of
8266      * average no. of packets aggregated.
8267      */
8268     swstats->sum_avg_pkts_aggregated += lro->sg_num;
8269     swstats->num_aggregations++;
8270 }
8271 
8272 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8273                  struct tcphdr *tcp, u32 l4_pyld)
8274 {
8275     DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8276     lro->total_len += l4_pyld;
8277     lro->frags_len += l4_pyld;
8278     lro->tcp_next_seq += l4_pyld;
8279     lro->sg_num++;
8280 
8281     /* Update ack seq no. and window ad(from this pkt) in LRO object */
8282     lro->tcp_ack = tcp->ack_seq;
8283     lro->window = tcp->window;
8284 
8285     if (lro->saw_ts) {
8286         __be32 *ptr;
8287         /* Update tsecr and tsval from this packet */
8288         ptr = (__be32 *)(tcp+1);
8289         lro->cur_tsval = ntohl(*(ptr+1));
8290         lro->cur_tsecr = *(ptr + 2);
8291     }
8292 }
8293 
8294 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8295                     struct tcphdr *tcp, u32 tcp_pyld_len)
8296 {
8297     u8 *ptr;
8298 
8299     DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8300 
8301     if (!tcp_pyld_len) {
8302         /* Runt frame or a pure ack */
8303         return -1;
8304     }
8305 
8306     if (ip->ihl != 5) /* IP has options */
8307         return -1;
8308 
8309     /* If we see CE codepoint in IP header, packet is not mergeable */
8310     if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8311         return -1;
8312 
8313     /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8314     if (tcp->urg || tcp->psh || tcp->rst ||
8315         tcp->syn || tcp->fin ||
8316         tcp->ece || tcp->cwr || !tcp->ack) {
8317         /*
8318          * Currently recognize only the ack control word and
8319          * any other control field being set would result in
8320          * flushing the LRO session
8321          */
8322         return -1;
8323     }
8324 
8325     /*
8326      * Allow only one TCP timestamp option. Don't aggregate if
8327      * any other options are detected.
8328      */
8329     if (tcp->doff != 5 && tcp->doff != 8)
8330         return -1;
8331 
8332     if (tcp->doff == 8) {
8333         ptr = (u8 *)(tcp + 1);
8334         while (*ptr == TCPOPT_NOP)
8335             ptr++;
8336         if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8337             return -1;
8338 
8339         /* Ensure timestamp value increases monotonically */
8340         if (l_lro)
8341             if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8342                 return -1;
8343 
8344         /* timestamp echo reply should be non-zero */
8345         if (*((__be32 *)(ptr+6)) == 0)
8346             return -1;
8347     }
8348 
8349     return 0;
8350 }
8351 
8352 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8353                  u8 **tcp, u32 *tcp_len, struct lro **lro,
8354                  struct RxD_t *rxdp, struct s2io_nic *sp)
8355 {
8356     struct iphdr *ip;
8357     struct tcphdr *tcph;
8358     int ret = 0, i;
8359     u16 vlan_tag = 0;
8360     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8361 
8362     ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8363                    rxdp, sp);
8364     if (ret)
8365         return ret;
8366 
8367     DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8368 
8369     vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8370     tcph = (struct tcphdr *)*tcp;
8371     *tcp_len = get_l4_pyld_length(ip, tcph);
8372     for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8373         struct lro *l_lro = &ring_data->lro0_n[i];
8374         if (l_lro->in_use) {
8375             if (check_for_socket_match(l_lro, ip, tcph))
8376                 continue;
8377             /* Sock pair matched */
8378             *lro = l_lro;
8379 
8380             if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8381                 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8382                       "expected 0x%x, actual 0x%x\n",
8383                       __func__,
8384                       (*lro)->tcp_next_seq,
8385                       ntohl(tcph->seq));
8386 
8387                 swstats->outof_sequence_pkts++;
8388                 ret = 2;
8389                 break;
8390             }
8391 
8392             if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8393                               *tcp_len))
8394                 ret = 1; /* Aggregate */
8395             else
8396                 ret = 2; /* Flush both */
8397             break;
8398         }
8399     }
8400 
8401     if (ret == 0) {
8402         /* Before searching for available LRO objects,
8403          * check if the pkt is L3/L4 aggregatable. If not
8404          * don't create new LRO session. Just send this
8405          * packet up.
8406          */
8407         if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8408             return 5;
8409 
8410         for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8411             struct lro *l_lro = &ring_data->lro0_n[i];
8412             if (!(l_lro->in_use)) {
8413                 *lro = l_lro;
8414                 ret = 3; /* Begin anew */
8415                 break;
8416             }
8417         }
8418     }
8419 
8420     if (ret == 0) { /* sessions exceeded */
8421         DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8422               __func__);
8423         *lro = NULL;
8424         return ret;
8425     }
8426 
8427     switch (ret) {
8428     case 3:
8429         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8430                      vlan_tag);
8431         break;
8432     case 2:
8433         update_L3L4_header(sp, *lro);
8434         break;
8435     case 1:
8436         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8437         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8438             update_L3L4_header(sp, *lro);
8439             ret = 4; /* Flush the LRO */
8440         }
8441         break;
8442     default:
8443         DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8444         break;
8445     }
8446 
8447     return ret;
8448 }
8449 
8450 static void clear_lro_session(struct lro *lro)
8451 {
8452     static u16 lro_struct_size = sizeof(struct lro);
8453 
8454     memset(lro, 0, lro_struct_size);
8455 }
8456 
8457 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8458 {
8459     struct net_device *dev = skb->dev;
8460     struct s2io_nic *sp = netdev_priv(dev);
8461 
8462     skb->protocol = eth_type_trans(skb, dev);
8463     if (vlan_tag && sp->vlan_strip_flag)
8464         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8465     if (sp->config.napi)
8466         netif_receive_skb(skb);
8467     else
8468         netif_rx(skb);
8469 }
8470 
8471 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8472                struct sk_buff *skb, u32 tcp_len)
8473 {
8474     struct sk_buff *first = lro->parent;
8475     struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8476 
8477     first->len += tcp_len;
8478     first->data_len = lro->frags_len;
8479     skb_pull(skb, (skb->len - tcp_len));
8480     if (skb_shinfo(first)->frag_list)
8481         lro->last_frag->next = skb;
8482     else
8483         skb_shinfo(first)->frag_list = skb;
8484     first->truesize += skb->truesize;
8485     lro->last_frag = skb;
8486     swstats->clubbed_frms_cnt++;
8487 }
8488 
8489 /**
8490  * s2io_io_error_detected - called when PCI error is detected
8491  * @pdev: Pointer to PCI device
8492  * @state: The current pci connection state
8493  *
8494  * This function is called after a PCI bus error affecting
8495  * this device has been detected.
8496  */
8497 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8498                            pci_channel_state_t state)
8499 {
8500     struct net_device *netdev = pci_get_drvdata(pdev);
8501     struct s2io_nic *sp = netdev_priv(netdev);
8502 
8503     netif_device_detach(netdev);
8504 
8505     if (state == pci_channel_io_perm_failure)
8506         return PCI_ERS_RESULT_DISCONNECT;
8507 
8508     if (netif_running(netdev)) {
8509         /* Bring down the card, while avoiding PCI I/O */
8510         do_s2io_card_down(sp, 0);
8511     }
8512     pci_disable_device(pdev);
8513 
8514     return PCI_ERS_RESULT_NEED_RESET;
8515 }
8516 
8517 /**
8518  * s2io_io_slot_reset - called after the pci bus has been reset.
8519  * @pdev: Pointer to PCI device
8520  *
8521  * Restart the card from scratch, as if from a cold-boot.
8522  * At this point, the card has exprienced a hard reset,
8523  * followed by fixups by BIOS, and has its config space
8524  * set up identically to what it was at cold boot.
8525  */
8526 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8527 {
8528     struct net_device *netdev = pci_get_drvdata(pdev);
8529     struct s2io_nic *sp = netdev_priv(netdev);
8530 
8531     if (pci_enable_device(pdev)) {
8532         pr_err("Cannot re-enable PCI device after reset.\n");
8533         return PCI_ERS_RESULT_DISCONNECT;
8534     }
8535 
8536     pci_set_master(pdev);
8537     s2io_reset(sp);
8538 
8539     return PCI_ERS_RESULT_RECOVERED;
8540 }
8541 
8542 /**
8543  * s2io_io_resume - called when traffic can start flowing again.
8544  * @pdev: Pointer to PCI device
8545  *
8546  * This callback is called when the error recovery driver tells
8547  * us that its OK to resume normal operation.
8548  */
8549 static void s2io_io_resume(struct pci_dev *pdev)
8550 {
8551     struct net_device *netdev = pci_get_drvdata(pdev);
8552     struct s2io_nic *sp = netdev_priv(netdev);
8553 
8554     if (netif_running(netdev)) {
8555         if (s2io_card_up(sp)) {
8556             pr_err("Can't bring device back up after reset.\n");
8557             return;
8558         }
8559 
8560         if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8561             s2io_card_down(sp);
8562             pr_err("Can't restore mac addr after reset.\n");
8563             return;
8564         }
8565     }
8566 
8567     netif_device_attach(netdev);
8568     netif_tx_wake_all_queues(netdev);
8569 }