Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright 2010-2011 Calxeda, Inc.
0004  */
0005 #include <linux/module.h>
0006 #include <linux/mod_devicetable.h>
0007 #include <linux/kernel.h>
0008 #include <linux/circ_buf.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/etherdevice.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/skbuff.h>
0013 #include <linux/ethtool.h>
0014 #include <linux/if.h>
0015 #include <linux/crc32.h>
0016 #include <linux/dma-mapping.h>
0017 #include <linux/slab.h>
0018 
0019 /* XGMAC Register definitions */
0020 #define XGMAC_CONTROL       0x00000000  /* MAC Configuration */
0021 #define XGMAC_FRAME_FILTER  0x00000004  /* MAC Frame Filter */
0022 #define XGMAC_FLOW_CTRL     0x00000018  /* MAC Flow Control */
0023 #define XGMAC_VLAN_TAG      0x0000001C  /* VLAN Tags */
0024 #define XGMAC_VERSION       0x00000020  /* Version */
0025 #define XGMAC_VLAN_INCL     0x00000024  /* VLAN tag for tx frames */
0026 #define XGMAC_LPI_CTRL      0x00000028  /* LPI Control and Status */
0027 #define XGMAC_LPI_TIMER     0x0000002C  /* LPI Timers Control */
0028 #define XGMAC_TX_PACE       0x00000030  /* Transmit Pace and Stretch */
0029 #define XGMAC_VLAN_HASH     0x00000034  /* VLAN Hash Table */
0030 #define XGMAC_DEBUG     0x00000038  /* Debug */
0031 #define XGMAC_INT_STAT      0x0000003C  /* Interrupt and Control */
0032 #define XGMAC_ADDR_HIGH(reg)    (0x00000040 + ((reg) * 8))
0033 #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
0034 #define XGMAC_HASH(n)       (0x00000300 + (n) * 4) /* HASH table regs */
0035 #define XGMAC_NUM_HASH      16
0036 #define XGMAC_OMR       0x00000400
0037 #define XGMAC_REMOTE_WAKE   0x00000700  /* Remote Wake-Up Frm Filter */
0038 #define XGMAC_PMT       0x00000704  /* PMT Control and Status */
0039 #define XGMAC_MMC_CTRL      0x00000800  /* XGMAC MMC Control */
0040 #define XGMAC_MMC_INTR_RX   0x00000804  /* Receive Interrupt */
0041 #define XGMAC_MMC_INTR_TX   0x00000808  /* Transmit Interrupt */
0042 #define XGMAC_MMC_INTR_MASK_RX  0x0000080c  /* Receive Interrupt Mask */
0043 #define XGMAC_MMC_INTR_MASK_TX  0x00000810  /* Transmit Interrupt Mask */
0044 
0045 /* Hardware TX Statistics Counters */
0046 #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
0047 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
0048 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
0049 #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
0050 #define XGMAC_MMC_TXBCFRAME_G   0x00000824
0051 #define XGMAC_MMC_TXMCFRAME_G   0x0000082C
0052 #define XGMAC_MMC_TXUCFRAME_GB  0x00000864
0053 #define XGMAC_MMC_TXMCFRAME_GB  0x0000086C
0054 #define XGMAC_MMC_TXBCFRAME_GB  0x00000874
0055 #define XGMAC_MMC_TXUNDERFLOW   0x0000087C
0056 #define XGMAC_MMC_TXOCTET_G_LO  0x00000884
0057 #define XGMAC_MMC_TXOCTET_G_HI  0x00000888
0058 #define XGMAC_MMC_TXFRAME_G_LO  0x0000088C
0059 #define XGMAC_MMC_TXFRAME_G_HI  0x00000890
0060 #define XGMAC_MMC_TXPAUSEFRAME  0x00000894
0061 #define XGMAC_MMC_TXVLANFRAME   0x0000089C
0062 
0063 /* Hardware RX Statistics Counters */
0064 #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
0065 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
0066 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
0067 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
0068 #define XGMAC_MMC_RXOCTET_G_LO  0x00000910
0069 #define XGMAC_MMC_RXOCTET_G_HI  0x00000914
0070 #define XGMAC_MMC_RXBCFRAME_G   0x00000918
0071 #define XGMAC_MMC_RXMCFRAME_G   0x00000920
0072 #define XGMAC_MMC_RXCRCERR  0x00000928
0073 #define XGMAC_MMC_RXRUNT    0x00000930
0074 #define XGMAC_MMC_RXJABBER  0x00000934
0075 #define XGMAC_MMC_RXUCFRAME_G   0x00000970
0076 #define XGMAC_MMC_RXLENGTHERR   0x00000978
0077 #define XGMAC_MMC_RXPAUSEFRAME  0x00000988
0078 #define XGMAC_MMC_RXOVERFLOW    0x00000990
0079 #define XGMAC_MMC_RXVLANFRAME   0x00000998
0080 #define XGMAC_MMC_RXWATCHDOG    0x000009a0
0081 
0082 /* DMA Control and Status Registers */
0083 #define XGMAC_DMA_BUS_MODE  0x00000f00  /* Bus Mode */
0084 #define XGMAC_DMA_TX_POLL   0x00000f04  /* Transmit Poll Demand */
0085 #define XGMAC_DMA_RX_POLL   0x00000f08  /* Received Poll Demand */
0086 #define XGMAC_DMA_RX_BASE_ADDR  0x00000f0c  /* Receive List Base */
0087 #define XGMAC_DMA_TX_BASE_ADDR  0x00000f10  /* Transmit List Base */
0088 #define XGMAC_DMA_STATUS    0x00000f14  /* Status Register */
0089 #define XGMAC_DMA_CONTROL   0x00000f18  /* Ctrl (Operational Mode) */
0090 #define XGMAC_DMA_INTR_ENA  0x00000f1c  /* Interrupt Enable */
0091 #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
0092 #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24  /* RX Intr Watchdog Timer */
0093 #define XGMAC_DMA_AXI_BUS   0x00000f28  /* AXI Bus Mode */
0094 #define XGMAC_DMA_AXI_STATUS    0x00000f2C  /* AXI Status */
0095 #define XGMAC_DMA_HW_FEATURE    0x00000f58  /* Enabled Hardware Features */
0096 
0097 #define XGMAC_ADDR_AE       0x80000000
0098 
0099 /* PMT Control and Status */
0100 #define XGMAC_PMT_POINTER_RESET 0x80000000
0101 #define XGMAC_PMT_GLBL_UNICAST  0x00000200
0102 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
0103 #define XGMAC_PMT_MAGIC_PKT 0x00000020
0104 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
0105 #define XGMAC_PMT_MAGIC_PKT_EN  0x00000002
0106 #define XGMAC_PMT_POWERDOWN 0x00000001
0107 
0108 #define XGMAC_CONTROL_SPD   0x40000000  /* Speed control */
0109 #define XGMAC_CONTROL_SPD_MASK  0x60000000
0110 #define XGMAC_CONTROL_SPD_1G    0x60000000
0111 #define XGMAC_CONTROL_SPD_2_5G  0x40000000
0112 #define XGMAC_CONTROL_SPD_10G   0x00000000
0113 #define XGMAC_CONTROL_SARC  0x10000000  /* Source Addr Insert/Replace */
0114 #define XGMAC_CONTROL_SARK_MASK 0x18000000
0115 #define XGMAC_CONTROL_CAR   0x04000000  /* CRC Addition/Replacement */
0116 #define XGMAC_CONTROL_CAR_MASK  0x06000000
0117 #define XGMAC_CONTROL_DP    0x01000000  /* Disable Padding */
0118 #define XGMAC_CONTROL_WD    0x00800000  /* Disable Watchdog on rx */
0119 #define XGMAC_CONTROL_JD    0x00400000  /* Jabber disable */
0120 #define XGMAC_CONTROL_JE    0x00100000  /* Jumbo frame */
0121 #define XGMAC_CONTROL_LM    0x00001000  /* Loop-back mode */
0122 #define XGMAC_CONTROL_IPC   0x00000400  /* Checksum Offload */
0123 #define XGMAC_CONTROL_ACS   0x00000080  /* Automatic Pad/FCS Strip */
0124 #define XGMAC_CONTROL_DDIC  0x00000010  /* Disable Deficit Idle Count */
0125 #define XGMAC_CONTROL_TE    0x00000008  /* Transmitter Enable */
0126 #define XGMAC_CONTROL_RE    0x00000004  /* Receiver Enable */
0127 
0128 /* XGMAC Frame Filter defines */
0129 #define XGMAC_FRAME_FILTER_PR   0x00000001  /* Promiscuous Mode */
0130 #define XGMAC_FRAME_FILTER_HUC  0x00000002  /* Hash Unicast */
0131 #define XGMAC_FRAME_FILTER_HMC  0x00000004  /* Hash Multicast */
0132 #define XGMAC_FRAME_FILTER_DAIF 0x00000008  /* DA Inverse Filtering */
0133 #define XGMAC_FRAME_FILTER_PM   0x00000010  /* Pass all multicast */
0134 #define XGMAC_FRAME_FILTER_DBF  0x00000020  /* Disable Broadcast frames */
0135 #define XGMAC_FRAME_FILTER_SAIF 0x00000100  /* Inverse Filtering */
0136 #define XGMAC_FRAME_FILTER_SAF  0x00000200  /* Source Address Filter */
0137 #define XGMAC_FRAME_FILTER_HPF  0x00000400  /* Hash or perfect Filter */
0138 #define XGMAC_FRAME_FILTER_VHF  0x00000800  /* VLAN Hash Filter */
0139 #define XGMAC_FRAME_FILTER_VPF  0x00001000  /* VLAN Perfect Filter */
0140 #define XGMAC_FRAME_FILTER_RA   0x80000000  /* Receive all mode */
0141 
0142 /* XGMAC FLOW CTRL defines */
0143 #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000  /* Pause Time Mask */
0144 #define XGMAC_FLOW_CTRL_PT_SHIFT    16
0145 #define XGMAC_FLOW_CTRL_DZQP    0x00000080  /* Disable Zero-Quanta Phase */
0146 #define XGMAC_FLOW_CTRL_PLT 0x00000020  /* Pause Low Threshold */
0147 #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
0148 #define XGMAC_FLOW_CTRL_UP  0x00000008  /* Unicast Pause Frame Detect */
0149 #define XGMAC_FLOW_CTRL_RFE 0x00000004  /* Rx Flow Control Enable */
0150 #define XGMAC_FLOW_CTRL_TFE 0x00000002  /* Tx Flow Control Enable */
0151 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001  /* Flow Control Busy ... */
0152 
0153 /* XGMAC_INT_STAT reg */
0154 #define XGMAC_INT_STAT_PMTIM    0x00800000  /* PMT Interrupt Mask */
0155 #define XGMAC_INT_STAT_PMT  0x0080      /* PMT Interrupt Status */
0156 #define XGMAC_INT_STAT_LPI  0x0040      /* LPI Interrupt Status */
0157 
0158 /* DMA Bus Mode register defines */
0159 #define DMA_BUS_MODE_SFT_RESET  0x00000001  /* Software Reset */
0160 #define DMA_BUS_MODE_DSL_MASK   0x0000007c  /* Descriptor Skip Length */
0161 #define DMA_BUS_MODE_DSL_SHIFT  2       /* (in DWORDS) */
0162 #define DMA_BUS_MODE_ATDS   0x00000080  /* Alternate Descriptor Size */
0163 
0164 /* Programmable burst length */
0165 #define DMA_BUS_MODE_PBL_MASK   0x00003f00  /* Programmable Burst Len */
0166 #define DMA_BUS_MODE_PBL_SHIFT  8
0167 #define DMA_BUS_MODE_FB     0x00010000  /* Fixed burst */
0168 #define DMA_BUS_MODE_RPBL_MASK  0x003e0000  /* Rx-Programmable Burst Len */
0169 #define DMA_BUS_MODE_RPBL_SHIFT 17
0170 #define DMA_BUS_MODE_USP    0x00800000
0171 #define DMA_BUS_MODE_8PBL   0x01000000
0172 #define DMA_BUS_MODE_AAL    0x02000000
0173 
0174 /* DMA Bus Mode register defines */
0175 #define DMA_BUS_PR_RATIO_MASK   0x0000c000  /* Rx/Tx priority ratio */
0176 #define DMA_BUS_PR_RATIO_SHIFT  14
0177 #define DMA_BUS_FB      0x00010000  /* Fixed Burst */
0178 
0179 /* DMA Control register defines */
0180 #define DMA_CONTROL_ST      0x00002000  /* Start/Stop Transmission */
0181 #define DMA_CONTROL_SR      0x00000002  /* Start/Stop Receive */
0182 #define DMA_CONTROL_DFF     0x01000000  /* Disable flush of rx frames */
0183 #define DMA_CONTROL_OSF     0x00000004  /* Operate on 2nd tx frame */
0184 
0185 /* DMA Normal interrupt */
0186 #define DMA_INTR_ENA_NIE    0x00010000  /* Normal Summary */
0187 #define DMA_INTR_ENA_AIE    0x00008000  /* Abnormal Summary */
0188 #define DMA_INTR_ENA_ERE    0x00004000  /* Early Receive */
0189 #define DMA_INTR_ENA_FBE    0x00002000  /* Fatal Bus Error */
0190 #define DMA_INTR_ENA_ETE    0x00000400  /* Early Transmit */
0191 #define DMA_INTR_ENA_RWE    0x00000200  /* Receive Watchdog */
0192 #define DMA_INTR_ENA_RSE    0x00000100  /* Receive Stopped */
0193 #define DMA_INTR_ENA_RUE    0x00000080  /* Receive Buffer Unavailable */
0194 #define DMA_INTR_ENA_RIE    0x00000040  /* Receive Interrupt */
0195 #define DMA_INTR_ENA_UNE    0x00000020  /* Tx Underflow */
0196 #define DMA_INTR_ENA_OVE    0x00000010  /* Receive Overflow */
0197 #define DMA_INTR_ENA_TJE    0x00000008  /* Transmit Jabber */
0198 #define DMA_INTR_ENA_TUE    0x00000004  /* Transmit Buffer Unavail */
0199 #define DMA_INTR_ENA_TSE    0x00000002  /* Transmit Stopped */
0200 #define DMA_INTR_ENA_TIE    0x00000001  /* Transmit Interrupt */
0201 
0202 #define DMA_INTR_NORMAL     (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
0203                  DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
0204 
0205 #define DMA_INTR_ABNORMAL   (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
0206                  DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
0207                  DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
0208                  DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
0209                  DMA_INTR_ENA_TSE)
0210 
0211 /* DMA default interrupt mask */
0212 #define DMA_INTR_DEFAULT_MASK   (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
0213 
0214 /* DMA Status register defines */
0215 #define DMA_STATUS_GMI      0x08000000  /* MMC interrupt */
0216 #define DMA_STATUS_GLI      0x04000000  /* GMAC Line interface int */
0217 #define DMA_STATUS_EB_MASK  0x00380000  /* Error Bits Mask */
0218 #define DMA_STATUS_EB_TX_ABORT  0x00080000  /* Error Bits - TX Abort */
0219 #define DMA_STATUS_EB_RX_ABORT  0x00100000  /* Error Bits - RX Abort */
0220 #define DMA_STATUS_TS_MASK  0x00700000  /* Transmit Process State */
0221 #define DMA_STATUS_TS_SHIFT 20
0222 #define DMA_STATUS_RS_MASK  0x000e0000  /* Receive Process State */
0223 #define DMA_STATUS_RS_SHIFT 17
0224 #define DMA_STATUS_NIS      0x00010000  /* Normal Interrupt Summary */
0225 #define DMA_STATUS_AIS      0x00008000  /* Abnormal Interrupt Summary */
0226 #define DMA_STATUS_ERI      0x00004000  /* Early Receive Interrupt */
0227 #define DMA_STATUS_FBI      0x00002000  /* Fatal Bus Error Interrupt */
0228 #define DMA_STATUS_ETI      0x00000400  /* Early Transmit Interrupt */
0229 #define DMA_STATUS_RWT      0x00000200  /* Receive Watchdog Timeout */
0230 #define DMA_STATUS_RPS      0x00000100  /* Receive Process Stopped */
0231 #define DMA_STATUS_RU       0x00000080  /* Receive Buffer Unavailable */
0232 #define DMA_STATUS_RI       0x00000040  /* Receive Interrupt */
0233 #define DMA_STATUS_UNF      0x00000020  /* Transmit Underflow */
0234 #define DMA_STATUS_OVF      0x00000010  /* Receive Overflow */
0235 #define DMA_STATUS_TJT      0x00000008  /* Transmit Jabber Timeout */
0236 #define DMA_STATUS_TU       0x00000004  /* Transmit Buffer Unavail */
0237 #define DMA_STATUS_TPS      0x00000002  /* Transmit Process Stopped */
0238 #define DMA_STATUS_TI       0x00000001  /* Transmit Interrupt */
0239 
0240 /* Common MAC defines */
0241 #define MAC_ENABLE_TX       0x00000008  /* Transmitter Enable */
0242 #define MAC_ENABLE_RX       0x00000004  /* Receiver Enable */
0243 
0244 /* XGMAC Operation Mode Register */
0245 #define XGMAC_OMR_TSF       0x00200000  /* TX FIFO Store and Forward */
0246 #define XGMAC_OMR_FTF       0x00100000  /* Flush Transmit FIFO */
0247 #define XGMAC_OMR_TTC       0x00020000  /* Transmit Threshold Ctrl */
0248 #define XGMAC_OMR_TTC_MASK  0x00030000
0249 #define XGMAC_OMR_RFD       0x00006000  /* FC Deactivation Threshold */
0250 #define XGMAC_OMR_RFD_MASK  0x00007000  /* FC Deact Threshold MASK */
0251 #define XGMAC_OMR_RFA       0x00000600  /* FC Activation Threshold */
0252 #define XGMAC_OMR_RFA_MASK  0x00000E00  /* FC Act Threshold MASK */
0253 #define XGMAC_OMR_EFC       0x00000100  /* Enable Hardware FC */
0254 #define XGMAC_OMR_FEF       0x00000080  /* Forward Error Frames */
0255 #define XGMAC_OMR_DT        0x00000040  /* Drop TCP/IP csum Errors */
0256 #define XGMAC_OMR_RSF       0x00000020  /* RX FIFO Store and Forward */
0257 #define XGMAC_OMR_RTC_256   0x00000018  /* RX Threshold Ctrl */
0258 #define XGMAC_OMR_RTC_MASK  0x00000018  /* RX Threshold Ctrl MASK */
0259 
0260 /* XGMAC HW Features Register */
0261 #define DMA_HW_FEAT_TXCOESEL    0x00010000  /* TX Checksum offload */
0262 
0263 #define XGMAC_MMC_CTRL_CNT_FRZ  0x00000008
0264 
0265 /* XGMAC Descriptor Defines */
0266 #define MAX_DESC_BUF_SZ     (0x2000 - 8)
0267 
0268 #define RXDESC_EXT_STATUS   0x00000001
0269 #define RXDESC_CRC_ERR      0x00000002
0270 #define RXDESC_RX_ERR       0x00000008
0271 #define RXDESC_RX_WDOG      0x00000010
0272 #define RXDESC_FRAME_TYPE   0x00000020
0273 #define RXDESC_GIANT_FRAME  0x00000080
0274 #define RXDESC_LAST_SEG     0x00000100
0275 #define RXDESC_FIRST_SEG    0x00000200
0276 #define RXDESC_VLAN_FRAME   0x00000400
0277 #define RXDESC_OVERFLOW_ERR 0x00000800
0278 #define RXDESC_LENGTH_ERR   0x00001000
0279 #define RXDESC_SA_FILTER_FAIL   0x00002000
0280 #define RXDESC_DESCRIPTOR_ERR   0x00004000
0281 #define RXDESC_ERROR_SUMMARY    0x00008000
0282 #define RXDESC_FRAME_LEN_OFFSET 16
0283 #define RXDESC_FRAME_LEN_MASK   0x3fff0000
0284 #define RXDESC_DA_FILTER_FAIL   0x40000000
0285 
0286 #define RXDESC1_END_RING    0x00008000
0287 
0288 #define RXDESC_IP_PAYLOAD_MASK  0x00000003
0289 #define RXDESC_IP_PAYLOAD_UDP   0x00000001
0290 #define RXDESC_IP_PAYLOAD_TCP   0x00000002
0291 #define RXDESC_IP_PAYLOAD_ICMP  0x00000003
0292 #define RXDESC_IP_HEADER_ERR    0x00000008
0293 #define RXDESC_IP_PAYLOAD_ERR   0x00000010
0294 #define RXDESC_IPV4_PACKET  0x00000040
0295 #define RXDESC_IPV6_PACKET  0x00000080
0296 #define TXDESC_UNDERFLOW_ERR    0x00000001
0297 #define TXDESC_JABBER_TIMEOUT   0x00000002
0298 #define TXDESC_LOCAL_FAULT  0x00000004
0299 #define TXDESC_REMOTE_FAULT 0x00000008
0300 #define TXDESC_VLAN_FRAME   0x00000010
0301 #define TXDESC_FRAME_FLUSHED    0x00000020
0302 #define TXDESC_IP_HEADER_ERR    0x00000040
0303 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
0304 #define TXDESC_ERROR_SUMMARY    0x00008000
0305 #define TXDESC_SA_CTRL_INSERT   0x00040000
0306 #define TXDESC_SA_CTRL_REPLACE  0x00080000
0307 #define TXDESC_2ND_ADDR_CHAINED 0x00100000
0308 #define TXDESC_END_RING     0x00200000
0309 #define TXDESC_CSUM_IP      0x00400000
0310 #define TXDESC_CSUM_IP_PAYLD    0x00800000
0311 #define TXDESC_CSUM_ALL     0x00C00000
0312 #define TXDESC_CRC_EN_REPLACE   0x01000000
0313 #define TXDESC_CRC_EN_APPEND    0x02000000
0314 #define TXDESC_DISABLE_PAD  0x04000000
0315 #define TXDESC_FIRST_SEG    0x10000000
0316 #define TXDESC_LAST_SEG     0x20000000
0317 #define TXDESC_INTERRUPT    0x40000000
0318 
0319 #define DESC_OWN        0x80000000
0320 #define DESC_BUFFER1_SZ_MASK    0x00001fff
0321 #define DESC_BUFFER2_SZ_MASK    0x1fff0000
0322 #define DESC_BUFFER2_SZ_OFFSET  16
0323 
0324 struct xgmac_dma_desc {
0325     __le32 flags;
0326     __le32 buf_size;
0327     __le32 buf1_addr;       /* Buffer 1 Address Pointer */
0328     __le32 buf2_addr;       /* Buffer 2 Address Pointer */
0329     __le32 ext_status;
0330     __le32 res[3];
0331 };
0332 
0333 struct xgmac_extra_stats {
0334     /* Transmit errors */
0335     unsigned long tx_jabber;
0336     unsigned long tx_frame_flushed;
0337     unsigned long tx_payload_error;
0338     unsigned long tx_ip_header_error;
0339     unsigned long tx_local_fault;
0340     unsigned long tx_remote_fault;
0341     /* Receive errors */
0342     unsigned long rx_watchdog;
0343     unsigned long rx_da_filter_fail;
0344     unsigned long rx_payload_error;
0345     unsigned long rx_ip_header_error;
0346     /* Tx/Rx IRQ errors */
0347     unsigned long tx_process_stopped;
0348     unsigned long rx_buf_unav;
0349     unsigned long rx_process_stopped;
0350     unsigned long tx_early;
0351     unsigned long fatal_bus_error;
0352 };
0353 
0354 struct xgmac_priv {
0355     struct xgmac_dma_desc *dma_rx;
0356     struct sk_buff **rx_skbuff;
0357     unsigned int rx_tail;
0358     unsigned int rx_head;
0359 
0360     struct xgmac_dma_desc *dma_tx;
0361     struct sk_buff **tx_skbuff;
0362     unsigned int tx_head;
0363     unsigned int tx_tail;
0364     int tx_irq_cnt;
0365 
0366     void __iomem *base;
0367     unsigned int dma_buf_sz;
0368     dma_addr_t dma_rx_phy;
0369     dma_addr_t dma_tx_phy;
0370 
0371     struct net_device *dev;
0372     struct device *device;
0373     struct napi_struct napi;
0374 
0375     int max_macs;
0376     struct xgmac_extra_stats xstats;
0377 
0378     spinlock_t stats_lock;
0379     int pmt_irq;
0380     char rx_pause;
0381     char tx_pause;
0382     int wolopts;
0383     struct work_struct tx_timeout_work;
0384 };
0385 
0386 /* XGMAC Configuration Settings */
0387 #define XGMAC_MAX_MTU       9000
0388 #define PAUSE_TIME      0x400
0389 
0390 #define DMA_RX_RING_SZ      256
0391 #define DMA_TX_RING_SZ      128
0392 /* minimum number of free TX descriptors required to wake up TX process */
0393 #define TX_THRESH       (DMA_TX_RING_SZ/4)
0394 
0395 /* DMA descriptor ring helpers */
0396 #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
0397 #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
0398 #define dma_ring_cnt(h, t, s)   CIRC_CNT(h, t, s)
0399 
0400 #define tx_dma_ring_space(p) \
0401     dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
0402 
0403 /* XGMAC Descriptor Access Helpers */
0404 static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
0405 {
0406     if (buf_sz > MAX_DESC_BUF_SZ)
0407         p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
0408             (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
0409     else
0410         p->buf_size = cpu_to_le32(buf_sz);
0411 }
0412 
0413 static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
0414 {
0415     u32 len = le32_to_cpu(p->buf_size);
0416     return (len & DESC_BUFFER1_SZ_MASK) +
0417         ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
0418 }
0419 
0420 static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
0421                      int buf_sz)
0422 {
0423     struct xgmac_dma_desc *end = p + ring_size - 1;
0424 
0425     memset(p, 0, sizeof(*p) * ring_size);
0426 
0427     for (; p <= end; p++)
0428         desc_set_buf_len(p, buf_sz);
0429 
0430     end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
0431 }
0432 
0433 static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
0434 {
0435     memset(p, 0, sizeof(*p) * ring_size);
0436     p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
0437 }
0438 
0439 static inline int desc_get_owner(struct xgmac_dma_desc *p)
0440 {
0441     return le32_to_cpu(p->flags) & DESC_OWN;
0442 }
0443 
0444 static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
0445 {
0446     /* Clear all fields and set the owner */
0447     p->flags = cpu_to_le32(DESC_OWN);
0448 }
0449 
0450 static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
0451 {
0452     u32 tmpflags = le32_to_cpu(p->flags);
0453     tmpflags &= TXDESC_END_RING;
0454     tmpflags |= flags | DESC_OWN;
0455     p->flags = cpu_to_le32(tmpflags);
0456 }
0457 
0458 static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
0459 {
0460     u32 tmpflags = le32_to_cpu(p->flags);
0461     tmpflags &= TXDESC_END_RING;
0462     p->flags = cpu_to_le32(tmpflags);
0463 }
0464 
0465 static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
0466 {
0467     return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
0468 }
0469 
0470 static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
0471 {
0472     return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
0473 }
0474 
0475 static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
0476 {
0477     return le32_to_cpu(p->buf1_addr);
0478 }
0479 
0480 static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
0481                      u32 paddr, int len)
0482 {
0483     p->buf1_addr = cpu_to_le32(paddr);
0484     if (len > MAX_DESC_BUF_SZ)
0485         p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
0486 }
0487 
0488 static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
0489                           u32 paddr, int len)
0490 {
0491     desc_set_buf_len(p, len);
0492     desc_set_buf_addr(p, paddr, len);
0493 }
0494 
0495 static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
0496 {
0497     u32 data = le32_to_cpu(p->flags);
0498     u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
0499     if (data & RXDESC_FRAME_TYPE)
0500         len -= ETH_FCS_LEN;
0501 
0502     return len;
0503 }
0504 
0505 static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
0506 {
0507     int timeout = 1000;
0508     u32 reg = readl(ioaddr + XGMAC_OMR);
0509     writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
0510 
0511     while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
0512         udelay(1);
0513 }
0514 
0515 static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
0516 {
0517     struct xgmac_extra_stats *x = &priv->xstats;
0518     u32 status = le32_to_cpu(p->flags);
0519 
0520     if (!(status & TXDESC_ERROR_SUMMARY))
0521         return 0;
0522 
0523     netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
0524     if (status & TXDESC_JABBER_TIMEOUT)
0525         x->tx_jabber++;
0526     if (status & TXDESC_FRAME_FLUSHED)
0527         x->tx_frame_flushed++;
0528     if (status & TXDESC_UNDERFLOW_ERR)
0529         xgmac_dma_flush_tx_fifo(priv->base);
0530     if (status & TXDESC_IP_HEADER_ERR)
0531         x->tx_ip_header_error++;
0532     if (status & TXDESC_LOCAL_FAULT)
0533         x->tx_local_fault++;
0534     if (status & TXDESC_REMOTE_FAULT)
0535         x->tx_remote_fault++;
0536     if (status & TXDESC_PAYLOAD_CSUM_ERR)
0537         x->tx_payload_error++;
0538 
0539     return -1;
0540 }
0541 
0542 static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
0543 {
0544     struct xgmac_extra_stats *x = &priv->xstats;
0545     int ret = CHECKSUM_UNNECESSARY;
0546     u32 status = le32_to_cpu(p->flags);
0547     u32 ext_status = le32_to_cpu(p->ext_status);
0548 
0549     if (status & RXDESC_DA_FILTER_FAIL) {
0550         netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
0551         x->rx_da_filter_fail++;
0552         return -1;
0553     }
0554 
0555     /* All frames should fit into a single buffer */
0556     if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
0557         return -1;
0558 
0559     /* Check if packet has checksum already */
0560     if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
0561         !(ext_status & RXDESC_IP_PAYLOAD_MASK))
0562         ret = CHECKSUM_NONE;
0563 
0564     netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
0565            (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
0566 
0567     if (!(status & RXDESC_ERROR_SUMMARY))
0568         return ret;
0569 
0570     /* Handle any errors */
0571     if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
0572         RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
0573         return -1;
0574 
0575     if (status & RXDESC_EXT_STATUS) {
0576         if (ext_status & RXDESC_IP_HEADER_ERR)
0577             x->rx_ip_header_error++;
0578         if (ext_status & RXDESC_IP_PAYLOAD_ERR)
0579             x->rx_payload_error++;
0580         netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
0581                ext_status);
0582         return CHECKSUM_NONE;
0583     }
0584 
0585     return ret;
0586 }
0587 
0588 static inline void xgmac_mac_enable(void __iomem *ioaddr)
0589 {
0590     u32 value = readl(ioaddr + XGMAC_CONTROL);
0591     value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
0592     writel(value, ioaddr + XGMAC_CONTROL);
0593 
0594     value = readl(ioaddr + XGMAC_DMA_CONTROL);
0595     value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
0596     writel(value, ioaddr + XGMAC_DMA_CONTROL);
0597 }
0598 
0599 static inline void xgmac_mac_disable(void __iomem *ioaddr)
0600 {
0601     u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
0602     value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
0603     writel(value, ioaddr + XGMAC_DMA_CONTROL);
0604 
0605     value = readl(ioaddr + XGMAC_CONTROL);
0606     value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
0607     writel(value, ioaddr + XGMAC_CONTROL);
0608 }
0609 
0610 static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
0611                    int num)
0612 {
0613     u32 data;
0614 
0615     if (addr) {
0616         data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
0617         writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
0618         data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
0619         writel(data, ioaddr + XGMAC_ADDR_LOW(num));
0620     } else {
0621         writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
0622         writel(0, ioaddr + XGMAC_ADDR_LOW(num));
0623     }
0624 }
0625 
0626 static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
0627                    int num)
0628 {
0629     u32 hi_addr, lo_addr;
0630 
0631     /* Read the MAC address from the hardware */
0632     hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
0633     lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
0634 
0635     /* Extract the MAC address from the high and low words */
0636     addr[0] = lo_addr & 0xff;
0637     addr[1] = (lo_addr >> 8) & 0xff;
0638     addr[2] = (lo_addr >> 16) & 0xff;
0639     addr[3] = (lo_addr >> 24) & 0xff;
0640     addr[4] = hi_addr & 0xff;
0641     addr[5] = (hi_addr >> 8) & 0xff;
0642 }
0643 
0644 static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
0645 {
0646     u32 reg;
0647     unsigned int flow = 0;
0648 
0649     priv->rx_pause = rx;
0650     priv->tx_pause = tx;
0651 
0652     if (rx || tx) {
0653         if (rx)
0654             flow |= XGMAC_FLOW_CTRL_RFE;
0655         if (tx)
0656             flow |= XGMAC_FLOW_CTRL_TFE;
0657 
0658         flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
0659         flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
0660 
0661         writel(flow, priv->base + XGMAC_FLOW_CTRL);
0662 
0663         reg = readl(priv->base + XGMAC_OMR);
0664         reg |= XGMAC_OMR_EFC;
0665         writel(reg, priv->base + XGMAC_OMR);
0666     } else {
0667         writel(0, priv->base + XGMAC_FLOW_CTRL);
0668 
0669         reg = readl(priv->base + XGMAC_OMR);
0670         reg &= ~XGMAC_OMR_EFC;
0671         writel(reg, priv->base + XGMAC_OMR);
0672     }
0673 
0674     return 0;
0675 }
0676 
0677 static void xgmac_rx_refill(struct xgmac_priv *priv)
0678 {
0679     struct xgmac_dma_desc *p;
0680     dma_addr_t paddr;
0681     int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
0682 
0683     while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
0684         int entry = priv->rx_head;
0685         struct sk_buff *skb;
0686 
0687         p = priv->dma_rx + entry;
0688 
0689         if (priv->rx_skbuff[entry] == NULL) {
0690             skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
0691             if (unlikely(skb == NULL))
0692                 break;
0693 
0694             paddr = dma_map_single(priv->device, skb->data,
0695                            priv->dma_buf_sz - NET_IP_ALIGN,
0696                            DMA_FROM_DEVICE);
0697             if (dma_mapping_error(priv->device, paddr)) {
0698                 dev_kfree_skb_any(skb);
0699                 break;
0700             }
0701             priv->rx_skbuff[entry] = skb;
0702             desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
0703         }
0704 
0705         netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
0706             priv->rx_head, priv->rx_tail);
0707 
0708         priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
0709         desc_set_rx_owner(p);
0710     }
0711 }
0712 
0713 /**
0714  * xgmac_dma_desc_rings_init - init the RX/TX descriptor rings
0715  * @dev: net device structure
0716  * Description:  this function initializes the DMA RX/TX descriptors
0717  * and allocates the socket buffers.
0718  */
0719 static int xgmac_dma_desc_rings_init(struct net_device *dev)
0720 {
0721     struct xgmac_priv *priv = netdev_priv(dev);
0722     unsigned int bfsize;
0723 
0724     /* Set the Buffer size according to the MTU;
0725      * The total buffer size including any IP offset must be a multiple
0726      * of 8 bytes.
0727      */
0728     bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
0729 
0730     netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
0731 
0732     priv->rx_skbuff = kcalloc(DMA_RX_RING_SZ, sizeof(struct sk_buff *),
0733                   GFP_KERNEL);
0734     if (!priv->rx_skbuff)
0735         return -ENOMEM;
0736 
0737     priv->dma_rx = dma_alloc_coherent(priv->device,
0738                       DMA_RX_RING_SZ *
0739                       sizeof(struct xgmac_dma_desc),
0740                       &priv->dma_rx_phy,
0741                       GFP_KERNEL);
0742     if (!priv->dma_rx)
0743         goto err_dma_rx;
0744 
0745     priv->tx_skbuff = kcalloc(DMA_TX_RING_SZ, sizeof(struct sk_buff *),
0746                   GFP_KERNEL);
0747     if (!priv->tx_skbuff)
0748         goto err_tx_skb;
0749 
0750     priv->dma_tx = dma_alloc_coherent(priv->device,
0751                       DMA_TX_RING_SZ *
0752                       sizeof(struct xgmac_dma_desc),
0753                       &priv->dma_tx_phy,
0754                       GFP_KERNEL);
0755     if (!priv->dma_tx)
0756         goto err_dma_tx;
0757 
0758     netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
0759         "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
0760         priv->dma_rx, priv->dma_tx,
0761         (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
0762 
0763     priv->rx_tail = 0;
0764     priv->rx_head = 0;
0765     priv->dma_buf_sz = bfsize;
0766     desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
0767     xgmac_rx_refill(priv);
0768 
0769     priv->tx_tail = 0;
0770     priv->tx_head = 0;
0771     desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
0772 
0773     writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
0774     writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
0775 
0776     return 0;
0777 
0778 err_dma_tx:
0779     kfree(priv->tx_skbuff);
0780 err_tx_skb:
0781     dma_free_coherent(priv->device,
0782               DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
0783               priv->dma_rx, priv->dma_rx_phy);
0784 err_dma_rx:
0785     kfree(priv->rx_skbuff);
0786     return -ENOMEM;
0787 }
0788 
0789 static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
0790 {
0791     int i;
0792     struct xgmac_dma_desc *p;
0793 
0794     if (!priv->rx_skbuff)
0795         return;
0796 
0797     for (i = 0; i < DMA_RX_RING_SZ; i++) {
0798         struct sk_buff *skb = priv->rx_skbuff[i];
0799         if (skb == NULL)
0800             continue;
0801 
0802         p = priv->dma_rx + i;
0803         dma_unmap_single(priv->device, desc_get_buf_addr(p),
0804                  priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
0805         dev_kfree_skb_any(skb);
0806         priv->rx_skbuff[i] = NULL;
0807     }
0808 }
0809 
0810 static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
0811 {
0812     int i;
0813     struct xgmac_dma_desc *p;
0814 
0815     if (!priv->tx_skbuff)
0816         return;
0817 
0818     for (i = 0; i < DMA_TX_RING_SZ; i++) {
0819         if (priv->tx_skbuff[i] == NULL)
0820             continue;
0821 
0822         p = priv->dma_tx + i;
0823         if (desc_get_tx_fs(p))
0824             dma_unmap_single(priv->device, desc_get_buf_addr(p),
0825                      desc_get_buf_len(p), DMA_TO_DEVICE);
0826         else
0827             dma_unmap_page(priv->device, desc_get_buf_addr(p),
0828                        desc_get_buf_len(p), DMA_TO_DEVICE);
0829 
0830         if (desc_get_tx_ls(p))
0831             dev_kfree_skb_any(priv->tx_skbuff[i]);
0832         priv->tx_skbuff[i] = NULL;
0833     }
0834 }
0835 
0836 static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
0837 {
0838     /* Release the DMA TX/RX socket buffers */
0839     xgmac_free_rx_skbufs(priv);
0840     xgmac_free_tx_skbufs(priv);
0841 
0842     /* Free the consistent memory allocated for descriptor rings */
0843     if (priv->dma_tx) {
0844         dma_free_coherent(priv->device,
0845                   DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
0846                   priv->dma_tx, priv->dma_tx_phy);
0847         priv->dma_tx = NULL;
0848     }
0849     if (priv->dma_rx) {
0850         dma_free_coherent(priv->device,
0851                   DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
0852                   priv->dma_rx, priv->dma_rx_phy);
0853         priv->dma_rx = NULL;
0854     }
0855     kfree(priv->rx_skbuff);
0856     priv->rx_skbuff = NULL;
0857     kfree(priv->tx_skbuff);
0858     priv->tx_skbuff = NULL;
0859 }
0860 
0861 /**
0862  * xgmac_tx_complete:
0863  * @priv: private driver structure
0864  * Description: it reclaims resources after transmission completes.
0865  */
0866 static void xgmac_tx_complete(struct xgmac_priv *priv)
0867 {
0868     while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
0869         unsigned int entry = priv->tx_tail;
0870         struct sk_buff *skb = priv->tx_skbuff[entry];
0871         struct xgmac_dma_desc *p = priv->dma_tx + entry;
0872 
0873         /* Check if the descriptor is owned by the DMA. */
0874         if (desc_get_owner(p))
0875             break;
0876 
0877         netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
0878             priv->tx_head, priv->tx_tail);
0879 
0880         if (desc_get_tx_fs(p))
0881             dma_unmap_single(priv->device, desc_get_buf_addr(p),
0882                      desc_get_buf_len(p), DMA_TO_DEVICE);
0883         else
0884             dma_unmap_page(priv->device, desc_get_buf_addr(p),
0885                        desc_get_buf_len(p), DMA_TO_DEVICE);
0886 
0887         /* Check tx error on the last segment */
0888         if (desc_get_tx_ls(p)) {
0889             desc_get_tx_status(priv, p);
0890             dev_consume_skb_any(skb);
0891         }
0892 
0893         priv->tx_skbuff[entry] = NULL;
0894         priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
0895     }
0896 
0897     /* Ensure tx_tail is visible to xgmac_xmit */
0898     smp_mb();
0899     if (unlikely(netif_queue_stopped(priv->dev) &&
0900         (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
0901         netif_wake_queue(priv->dev);
0902 }
0903 
0904 static void xgmac_tx_timeout_work(struct work_struct *work)
0905 {
0906     u32 reg, value;
0907     struct xgmac_priv *priv =
0908         container_of(work, struct xgmac_priv, tx_timeout_work);
0909 
0910     napi_disable(&priv->napi);
0911 
0912     writel(0, priv->base + XGMAC_DMA_INTR_ENA);
0913 
0914     netif_tx_lock(priv->dev);
0915 
0916     reg = readl(priv->base + XGMAC_DMA_CONTROL);
0917     writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
0918     do {
0919         value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
0920     } while (value && (value != 0x600000));
0921 
0922     xgmac_free_tx_skbufs(priv);
0923     desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
0924     priv->tx_tail = 0;
0925     priv->tx_head = 0;
0926     writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
0927     writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
0928 
0929     writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
0930         priv->base + XGMAC_DMA_STATUS);
0931 
0932     netif_tx_unlock(priv->dev);
0933     netif_wake_queue(priv->dev);
0934 
0935     napi_enable(&priv->napi);
0936 
0937     /* Enable interrupts */
0938     writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
0939     writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
0940 }
0941 
0942 static int xgmac_hw_init(struct net_device *dev)
0943 {
0944     u32 value, ctrl;
0945     int limit;
0946     struct xgmac_priv *priv = netdev_priv(dev);
0947     void __iomem *ioaddr = priv->base;
0948 
0949     /* Save the ctrl register value */
0950     ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
0951 
0952     /* SW reset */
0953     value = DMA_BUS_MODE_SFT_RESET;
0954     writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
0955     limit = 15000;
0956     while (limit-- &&
0957         (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
0958         cpu_relax();
0959     if (limit < 0)
0960         return -EBUSY;
0961 
0962     value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
0963         (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
0964         DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
0965     writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
0966 
0967     writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
0968 
0969     /* Mask power mgt interrupt */
0970     writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
0971 
0972     /* XGMAC requires AXI bus init. This is a 'magic number' for now */
0973     writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
0974 
0975     ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
0976         XGMAC_CONTROL_CAR;
0977     if (dev->features & NETIF_F_RXCSUM)
0978         ctrl |= XGMAC_CONTROL_IPC;
0979     writel(ctrl, ioaddr + XGMAC_CONTROL);
0980 
0981     writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
0982 
0983     /* Set the HW DMA mode and the COE */
0984     writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
0985         XGMAC_OMR_RTC_256,
0986         ioaddr + XGMAC_OMR);
0987 
0988     /* Reset the MMC counters */
0989     writel(1, ioaddr + XGMAC_MMC_CTRL);
0990     return 0;
0991 }
0992 
0993 /**
0994  *  xgmac_open - open entry point of the driver
0995  *  @dev : pointer to the device structure.
0996  *  Description:
0997  *  This function is the open entry point of the driver.
0998  *  Return value:
0999  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1000  *  file on failure.
1001  */
1002 static int xgmac_open(struct net_device *dev)
1003 {
1004     int ret;
1005     struct xgmac_priv *priv = netdev_priv(dev);
1006     void __iomem *ioaddr = priv->base;
1007 
1008     /* Check that the MAC address is valid.  If its not, refuse
1009      * to bring the device up. The user must specify an
1010      * address using the following linux command:
1011      *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
1012     if (!is_valid_ether_addr(dev->dev_addr)) {
1013         eth_hw_addr_random(dev);
1014         netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1015             dev->dev_addr);
1016     }
1017 
1018     memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1019 
1020     /* Initialize the XGMAC and descriptors */
1021     xgmac_hw_init(dev);
1022     xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1023     xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1024 
1025     ret = xgmac_dma_desc_rings_init(dev);
1026     if (ret < 0)
1027         return ret;
1028 
1029     /* Enable the MAC Rx/Tx */
1030     xgmac_mac_enable(ioaddr);
1031 
1032     napi_enable(&priv->napi);
1033     netif_start_queue(dev);
1034 
1035     /* Enable interrupts */
1036     writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1037     writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1038 
1039     return 0;
1040 }
1041 
1042 /**
1043  *  xgmac_stop - close entry point of the driver
1044  *  @dev : device pointer.
1045  *  Description:
1046  *  This is the stop entry point of the driver.
1047  */
1048 static int xgmac_stop(struct net_device *dev)
1049 {
1050     struct xgmac_priv *priv = netdev_priv(dev);
1051 
1052     if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1053         napi_disable(&priv->napi);
1054 
1055     writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1056 
1057     netif_tx_disable(dev);
1058 
1059     /* Disable the MAC core */
1060     xgmac_mac_disable(priv->base);
1061 
1062     /* Release and free the Rx/Tx resources */
1063     xgmac_free_dma_desc_rings(priv);
1064 
1065     return 0;
1066 }
1067 
1068 /**
1069  *  xgmac_xmit:
1070  *  @skb : the socket buffer
1071  *  @dev : device pointer
1072  *  Description : Tx entry point of the driver.
1073  */
1074 static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1075 {
1076     struct xgmac_priv *priv = netdev_priv(dev);
1077     unsigned int entry;
1078     int i;
1079     u32 irq_flag;
1080     int nfrags = skb_shinfo(skb)->nr_frags;
1081     struct xgmac_dma_desc *desc, *first;
1082     unsigned int desc_flags;
1083     unsigned int len;
1084     dma_addr_t paddr;
1085 
1086     priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1087     irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1088 
1089     desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1090         TXDESC_CSUM_ALL : 0;
1091     entry = priv->tx_head;
1092     desc = priv->dma_tx + entry;
1093     first = desc;
1094 
1095     len = skb_headlen(skb);
1096     paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1097     if (dma_mapping_error(priv->device, paddr)) {
1098         dev_kfree_skb_any(skb);
1099         return NETDEV_TX_OK;
1100     }
1101     priv->tx_skbuff[entry] = skb;
1102     desc_set_buf_addr_and_size(desc, paddr, len);
1103 
1104     for (i = 0; i < nfrags; i++) {
1105         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1106 
1107         len = skb_frag_size(frag);
1108 
1109         paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1110                      DMA_TO_DEVICE);
1111         if (dma_mapping_error(priv->device, paddr))
1112             goto dma_err;
1113 
1114         entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1115         desc = priv->dma_tx + entry;
1116         priv->tx_skbuff[entry] = skb;
1117 
1118         desc_set_buf_addr_and_size(desc, paddr, len);
1119         if (i < (nfrags - 1))
1120             desc_set_tx_owner(desc, desc_flags);
1121     }
1122 
1123     /* Interrupt on completition only for the latest segment */
1124     if (desc != first)
1125         desc_set_tx_owner(desc, desc_flags |
1126             TXDESC_LAST_SEG | irq_flag);
1127     else
1128         desc_flags |= TXDESC_LAST_SEG | irq_flag;
1129 
1130     /* Set owner on first desc last to avoid race condition */
1131     wmb();
1132     desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1133 
1134     writel(1, priv->base + XGMAC_DMA_TX_POLL);
1135 
1136     priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1137 
1138     /* Ensure tx_head update is visible to tx completion */
1139     smp_mb();
1140     if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1141         netif_stop_queue(dev);
1142         /* Ensure netif_stop_queue is visible to tx completion */
1143         smp_mb();
1144         if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1145             netif_start_queue(dev);
1146     }
1147     return NETDEV_TX_OK;
1148 
1149 dma_err:
1150     entry = priv->tx_head;
1151     for ( ; i > 0; i--) {
1152         entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1153         desc = priv->dma_tx + entry;
1154         priv->tx_skbuff[entry] = NULL;
1155         dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1156                    desc_get_buf_len(desc), DMA_TO_DEVICE);
1157         desc_clear_tx_owner(desc);
1158     }
1159     desc = first;
1160     dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1161              desc_get_buf_len(desc), DMA_TO_DEVICE);
1162     dev_kfree_skb_any(skb);
1163     return NETDEV_TX_OK;
1164 }
1165 
1166 static int xgmac_rx(struct xgmac_priv *priv, int limit)
1167 {
1168     unsigned int entry;
1169     unsigned int count = 0;
1170     struct xgmac_dma_desc *p;
1171 
1172     while (count < limit) {
1173         int ip_checksum;
1174         struct sk_buff *skb;
1175         int frame_len;
1176 
1177         if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
1178             break;
1179 
1180         entry = priv->rx_tail;
1181         p = priv->dma_rx + entry;
1182         if (desc_get_owner(p))
1183             break;
1184 
1185         count++;
1186         priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1187 
1188         /* read the status of the incoming frame */
1189         ip_checksum = desc_get_rx_status(priv, p);
1190         if (ip_checksum < 0)
1191             continue;
1192 
1193         skb = priv->rx_skbuff[entry];
1194         if (unlikely(!skb)) {
1195             netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1196             break;
1197         }
1198         priv->rx_skbuff[entry] = NULL;
1199 
1200         frame_len = desc_get_rx_frame_len(p);
1201         netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1202             frame_len, ip_checksum);
1203 
1204         skb_put(skb, frame_len);
1205         dma_unmap_single(priv->device, desc_get_buf_addr(p),
1206                  priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
1207 
1208         skb->protocol = eth_type_trans(skb, priv->dev);
1209         skb->ip_summed = ip_checksum;
1210         if (ip_checksum == CHECKSUM_NONE)
1211             netif_receive_skb(skb);
1212         else
1213             napi_gro_receive(&priv->napi, skb);
1214     }
1215 
1216     xgmac_rx_refill(priv);
1217 
1218     return count;
1219 }
1220 
1221 /**
1222  *  xgmac_poll - xgmac poll method (NAPI)
1223  *  @napi : pointer to the napi structure.
1224  *  @budget : maximum number of packets that the current CPU can receive from
1225  *        all interfaces.
1226  *  Description :
1227  *   This function implements the reception process.
1228  *   Also it runs the TX completion thread
1229  */
1230 static int xgmac_poll(struct napi_struct *napi, int budget)
1231 {
1232     struct xgmac_priv *priv = container_of(napi,
1233                        struct xgmac_priv, napi);
1234     int work_done = 0;
1235 
1236     xgmac_tx_complete(priv);
1237     work_done = xgmac_rx(priv, budget);
1238 
1239     if (work_done < budget) {
1240         napi_complete_done(napi, work_done);
1241         __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1242     }
1243     return work_done;
1244 }
1245 
1246 /**
1247  *  xgmac_tx_timeout
1248  *  @dev : Pointer to net device structure
1249  *  @txqueue: index of the hung transmit queue
1250  *
1251  *  Description: this function is called when a packet transmission fails to
1252  *   complete within a reasonable tmrate. The driver will mark the error in the
1253  *   netdev structure and arrange for the device to be reset to a sane state
1254  *   in order to transmit a new packet.
1255  */
1256 static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
1257 {
1258     struct xgmac_priv *priv = netdev_priv(dev);
1259     schedule_work(&priv->tx_timeout_work);
1260 }
1261 
1262 /**
1263  *  xgmac_set_rx_mode - entry point for multicast addressing
1264  *  @dev : pointer to the device structure
1265  *  Description:
1266  *  This function is a driver entry point which gets called by the kernel
1267  *  whenever multicast addresses must be enabled/disabled.
1268  *  Return value:
1269  *  void.
1270  */
1271 static void xgmac_set_rx_mode(struct net_device *dev)
1272 {
1273     int i;
1274     struct xgmac_priv *priv = netdev_priv(dev);
1275     void __iomem *ioaddr = priv->base;
1276     unsigned int value = 0;
1277     u32 hash_filter[XGMAC_NUM_HASH];
1278     int reg = 1;
1279     struct netdev_hw_addr *ha;
1280     bool use_hash = false;
1281 
1282     netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1283          netdev_mc_count(dev), netdev_uc_count(dev));
1284 
1285     if (dev->flags & IFF_PROMISC)
1286         value |= XGMAC_FRAME_FILTER_PR;
1287 
1288     memset(hash_filter, 0, sizeof(hash_filter));
1289 
1290     if (netdev_uc_count(dev) > priv->max_macs) {
1291         use_hash = true;
1292         value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1293     }
1294     netdev_for_each_uc_addr(ha, dev) {
1295         if (use_hash) {
1296             u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1297 
1298             /* The most significant 4 bits determine the register to
1299              * use (H/L) while the other 5 bits determine the bit
1300              * within the register. */
1301             hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1302         } else {
1303             xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1304             reg++;
1305         }
1306     }
1307 
1308     if (dev->flags & IFF_ALLMULTI) {
1309         value |= XGMAC_FRAME_FILTER_PM;
1310         goto out;
1311     }
1312 
1313     if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
1314         use_hash = true;
1315         value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1316     } else {
1317         use_hash = false;
1318     }
1319     netdev_for_each_mc_addr(ha, dev) {
1320         if (use_hash) {
1321             u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1322 
1323             /* The most significant 4 bits determine the register to
1324              * use (H/L) while the other 5 bits determine the bit
1325              * within the register. */
1326             hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1327         } else {
1328             xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1329             reg++;
1330         }
1331     }
1332 
1333 out:
1334     for (i = reg; i <= priv->max_macs; i++)
1335         xgmac_set_mac_addr(ioaddr, NULL, i);
1336     for (i = 0; i < XGMAC_NUM_HASH; i++)
1337         writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1338 
1339     writel(value, ioaddr + XGMAC_FRAME_FILTER);
1340 }
1341 
1342 /**
1343  *  xgmac_change_mtu - entry point to change MTU size for the device.
1344  *  @dev : device pointer.
1345  *  @new_mtu : the new MTU size for the device.
1346  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
1347  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
1348  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
1349  *  Return value:
1350  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1351  *  file on failure.
1352  */
1353 static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1354 {
1355     /* Stop everything, get ready to change the MTU */
1356     if (!netif_running(dev))
1357         return 0;
1358 
1359     /* Bring interface down, change mtu and bring interface back up */
1360     xgmac_stop(dev);
1361     dev->mtu = new_mtu;
1362     return xgmac_open(dev);
1363 }
1364 
1365 static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1366 {
1367     u32 intr_status;
1368     struct net_device *dev = (struct net_device *)dev_id;
1369     struct xgmac_priv *priv = netdev_priv(dev);
1370     void __iomem *ioaddr = priv->base;
1371 
1372     intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1373     if (intr_status & XGMAC_INT_STAT_PMT) {
1374         netdev_dbg(priv->dev, "received Magic frame\n");
1375         /* clear the PMT bits 5 and 6 by reading the PMT */
1376         readl(ioaddr + XGMAC_PMT);
1377     }
1378     return IRQ_HANDLED;
1379 }
1380 
1381 static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1382 {
1383     u32 intr_status;
1384     struct net_device *dev = (struct net_device *)dev_id;
1385     struct xgmac_priv *priv = netdev_priv(dev);
1386     struct xgmac_extra_stats *x = &priv->xstats;
1387 
1388     /* read the status register (CSR5) */
1389     intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1390     intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1391     __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1392 
1393     /* It displays the DMA process states (CSR5 register) */
1394     /* ABNORMAL interrupts */
1395     if (unlikely(intr_status & DMA_STATUS_AIS)) {
1396         if (intr_status & DMA_STATUS_TJT) {
1397             netdev_err(priv->dev, "transmit jabber\n");
1398             x->tx_jabber++;
1399         }
1400         if (intr_status & DMA_STATUS_RU)
1401             x->rx_buf_unav++;
1402         if (intr_status & DMA_STATUS_RPS) {
1403             netdev_err(priv->dev, "receive process stopped\n");
1404             x->rx_process_stopped++;
1405         }
1406         if (intr_status & DMA_STATUS_ETI) {
1407             netdev_err(priv->dev, "transmit early interrupt\n");
1408             x->tx_early++;
1409         }
1410         if (intr_status & DMA_STATUS_TPS) {
1411             netdev_err(priv->dev, "transmit process stopped\n");
1412             x->tx_process_stopped++;
1413             schedule_work(&priv->tx_timeout_work);
1414         }
1415         if (intr_status & DMA_STATUS_FBI) {
1416             netdev_err(priv->dev, "fatal bus error\n");
1417             x->fatal_bus_error++;
1418         }
1419     }
1420 
1421     /* TX/RX NORMAL interrupts */
1422     if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1423         __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1424         napi_schedule(&priv->napi);
1425     }
1426 
1427     return IRQ_HANDLED;
1428 }
1429 
1430 #ifdef CONFIG_NET_POLL_CONTROLLER
1431 /* Polling receive - used by NETCONSOLE and other diagnostic tools
1432  * to allow network I/O with interrupts disabled. */
1433 static void xgmac_poll_controller(struct net_device *dev)
1434 {
1435     disable_irq(dev->irq);
1436     xgmac_interrupt(dev->irq, dev);
1437     enable_irq(dev->irq);
1438 }
1439 #endif
1440 
1441 static void
1442 xgmac_get_stats64(struct net_device *dev,
1443           struct rtnl_link_stats64 *storage)
1444 {
1445     struct xgmac_priv *priv = netdev_priv(dev);
1446     void __iomem *base = priv->base;
1447     u32 count;
1448 
1449     spin_lock_bh(&priv->stats_lock);
1450     writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1451 
1452     storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1453     storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1454 
1455     storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1456     storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1457     storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1458     storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1459     storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1460 
1461     storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1462     storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1463 
1464     count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1465     storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1466     storage->tx_packets = count;
1467     storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1468 
1469     writel(0, base + XGMAC_MMC_CTRL);
1470     spin_unlock_bh(&priv->stats_lock);
1471 }
1472 
1473 static int xgmac_set_mac_address(struct net_device *dev, void *p)
1474 {
1475     struct xgmac_priv *priv = netdev_priv(dev);
1476     void __iomem *ioaddr = priv->base;
1477     struct sockaddr *addr = p;
1478 
1479     if (!is_valid_ether_addr(addr->sa_data))
1480         return -EADDRNOTAVAIL;
1481 
1482     eth_hw_addr_set(dev, addr->sa_data);
1483 
1484     xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1485 
1486     return 0;
1487 }
1488 
1489 static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1490 {
1491     u32 ctrl;
1492     struct xgmac_priv *priv = netdev_priv(dev);
1493     void __iomem *ioaddr = priv->base;
1494     netdev_features_t changed = dev->features ^ features;
1495 
1496     if (!(changed & NETIF_F_RXCSUM))
1497         return 0;
1498 
1499     ctrl = readl(ioaddr + XGMAC_CONTROL);
1500     if (features & NETIF_F_RXCSUM)
1501         ctrl |= XGMAC_CONTROL_IPC;
1502     else
1503         ctrl &= ~XGMAC_CONTROL_IPC;
1504     writel(ctrl, ioaddr + XGMAC_CONTROL);
1505 
1506     return 0;
1507 }
1508 
1509 static const struct net_device_ops xgmac_netdev_ops = {
1510     .ndo_open = xgmac_open,
1511     .ndo_start_xmit = xgmac_xmit,
1512     .ndo_stop = xgmac_stop,
1513     .ndo_change_mtu = xgmac_change_mtu,
1514     .ndo_set_rx_mode = xgmac_set_rx_mode,
1515     .ndo_tx_timeout = xgmac_tx_timeout,
1516     .ndo_get_stats64 = xgmac_get_stats64,
1517 #ifdef CONFIG_NET_POLL_CONTROLLER
1518     .ndo_poll_controller = xgmac_poll_controller,
1519 #endif
1520     .ndo_set_mac_address = xgmac_set_mac_address,
1521     .ndo_set_features = xgmac_set_features,
1522 };
1523 
1524 static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
1525                         struct ethtool_link_ksettings *cmd)
1526 {
1527     cmd->base.autoneg = 0;
1528     cmd->base.duplex = DUPLEX_FULL;
1529     cmd->base.speed = 10000;
1530     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0);
1531     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0);
1532     return 0;
1533 }
1534 
1535 static void xgmac_get_pauseparam(struct net_device *netdev,
1536                       struct ethtool_pauseparam *pause)
1537 {
1538     struct xgmac_priv *priv = netdev_priv(netdev);
1539 
1540     pause->rx_pause = priv->rx_pause;
1541     pause->tx_pause = priv->tx_pause;
1542 }
1543 
1544 static int xgmac_set_pauseparam(struct net_device *netdev,
1545                      struct ethtool_pauseparam *pause)
1546 {
1547     struct xgmac_priv *priv = netdev_priv(netdev);
1548 
1549     if (pause->autoneg)
1550         return -EINVAL;
1551 
1552     return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1553 }
1554 
1555 struct xgmac_stats {
1556     char stat_string[ETH_GSTRING_LEN];
1557     int stat_offset;
1558     bool is_reg;
1559 };
1560 
1561 #define XGMAC_STAT(m)   \
1562     { #m, offsetof(struct xgmac_priv, xstats.m), false }
1563 #define XGMAC_HW_STAT(m, reg_offset)    \
1564     { #m, reg_offset, true }
1565 
1566 static const struct xgmac_stats xgmac_gstrings_stats[] = {
1567     XGMAC_STAT(tx_frame_flushed),
1568     XGMAC_STAT(tx_payload_error),
1569     XGMAC_STAT(tx_ip_header_error),
1570     XGMAC_STAT(tx_local_fault),
1571     XGMAC_STAT(tx_remote_fault),
1572     XGMAC_STAT(tx_early),
1573     XGMAC_STAT(tx_process_stopped),
1574     XGMAC_STAT(tx_jabber),
1575     XGMAC_STAT(rx_buf_unav),
1576     XGMAC_STAT(rx_process_stopped),
1577     XGMAC_STAT(rx_payload_error),
1578     XGMAC_STAT(rx_ip_header_error),
1579     XGMAC_STAT(rx_da_filter_fail),
1580     XGMAC_STAT(fatal_bus_error),
1581     XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1582     XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1583     XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1584     XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1585     XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1586 };
1587 #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1588 
1589 static void xgmac_get_ethtool_stats(struct net_device *dev,
1590                      struct ethtool_stats *dummy,
1591                      u64 *data)
1592 {
1593     struct xgmac_priv *priv = netdev_priv(dev);
1594     void *p = priv;
1595     int i;
1596 
1597     for (i = 0; i < XGMAC_STATS_LEN; i++) {
1598         if (xgmac_gstrings_stats[i].is_reg)
1599             *data++ = readl(priv->base +
1600                 xgmac_gstrings_stats[i].stat_offset);
1601         else
1602             *data++ = *(u32 *)(p +
1603                 xgmac_gstrings_stats[i].stat_offset);
1604     }
1605 }
1606 
1607 static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1608 {
1609     switch (sset) {
1610     case ETH_SS_STATS:
1611         return XGMAC_STATS_LEN;
1612     default:
1613         return -EINVAL;
1614     }
1615 }
1616 
1617 static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1618                    u8 *data)
1619 {
1620     int i;
1621     u8 *p = data;
1622 
1623     switch (stringset) {
1624     case ETH_SS_STATS:
1625         for (i = 0; i < XGMAC_STATS_LEN; i++) {
1626             memcpy(p, xgmac_gstrings_stats[i].stat_string,
1627                    ETH_GSTRING_LEN);
1628             p += ETH_GSTRING_LEN;
1629         }
1630         break;
1631     default:
1632         WARN_ON(1);
1633         break;
1634     }
1635 }
1636 
1637 static void xgmac_get_wol(struct net_device *dev,
1638                    struct ethtool_wolinfo *wol)
1639 {
1640     struct xgmac_priv *priv = netdev_priv(dev);
1641 
1642     if (device_can_wakeup(priv->device)) {
1643         wol->supported = WAKE_MAGIC | WAKE_UCAST;
1644         wol->wolopts = priv->wolopts;
1645     }
1646 }
1647 
1648 static int xgmac_set_wol(struct net_device *dev,
1649                   struct ethtool_wolinfo *wol)
1650 {
1651     struct xgmac_priv *priv = netdev_priv(dev);
1652     u32 support = WAKE_MAGIC | WAKE_UCAST;
1653 
1654     if (!device_can_wakeup(priv->device))
1655         return -ENOTSUPP;
1656 
1657     if (wol->wolopts & ~support)
1658         return -EINVAL;
1659 
1660     priv->wolopts = wol->wolopts;
1661 
1662     if (wol->wolopts) {
1663         device_set_wakeup_enable(priv->device, 1);
1664         enable_irq_wake(dev->irq);
1665     } else {
1666         device_set_wakeup_enable(priv->device, 0);
1667         disable_irq_wake(dev->irq);
1668     }
1669 
1670     return 0;
1671 }
1672 
1673 static const struct ethtool_ops xgmac_ethtool_ops = {
1674     .get_link = ethtool_op_get_link,
1675     .get_pauseparam = xgmac_get_pauseparam,
1676     .set_pauseparam = xgmac_set_pauseparam,
1677     .get_ethtool_stats = xgmac_get_ethtool_stats,
1678     .get_strings = xgmac_get_strings,
1679     .get_wol = xgmac_get_wol,
1680     .set_wol = xgmac_set_wol,
1681     .get_sset_count = xgmac_get_sset_count,
1682     .get_link_ksettings = xgmac_ethtool_get_link_ksettings,
1683 };
1684 
1685 /**
1686  * xgmac_probe
1687  * @pdev: platform device pointer
1688  * Description: the driver is initialized through platform_device.
1689  */
1690 static int xgmac_probe(struct platform_device *pdev)
1691 {
1692     int ret = 0;
1693     struct resource *res;
1694     struct net_device *ndev = NULL;
1695     struct xgmac_priv *priv = NULL;
1696     u8 addr[ETH_ALEN];
1697     u32 uid;
1698 
1699     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1700     if (!res)
1701         return -ENODEV;
1702 
1703     if (!request_mem_region(res->start, resource_size(res), pdev->name))
1704         return -EBUSY;
1705 
1706     ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1707     if (!ndev) {
1708         ret = -ENOMEM;
1709         goto err_alloc;
1710     }
1711 
1712     SET_NETDEV_DEV(ndev, &pdev->dev);
1713     priv = netdev_priv(ndev);
1714     platform_set_drvdata(pdev, ndev);
1715     ndev->netdev_ops = &xgmac_netdev_ops;
1716     ndev->ethtool_ops = &xgmac_ethtool_ops;
1717     spin_lock_init(&priv->stats_lock);
1718     INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1719 
1720     priv->device = &pdev->dev;
1721     priv->dev = ndev;
1722     priv->rx_pause = 1;
1723     priv->tx_pause = 1;
1724 
1725     priv->base = ioremap(res->start, resource_size(res));
1726     if (!priv->base) {
1727         netdev_err(ndev, "ioremap failed\n");
1728         ret = -ENOMEM;
1729         goto err_io;
1730     }
1731 
1732     uid = readl(priv->base + XGMAC_VERSION);
1733     netdev_info(ndev, "h/w version is 0x%x\n", uid);
1734 
1735     /* Figure out how many valid mac address filter registers we have */
1736     writel(1, priv->base + XGMAC_ADDR_HIGH(31));
1737     if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
1738         priv->max_macs = 31;
1739     else
1740         priv->max_macs = 7;
1741 
1742     writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1743     ndev->irq = platform_get_irq(pdev, 0);
1744     if (ndev->irq == -ENXIO) {
1745         netdev_err(ndev, "No irq resource\n");
1746         ret = ndev->irq;
1747         goto err_irq;
1748     }
1749 
1750     ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1751               dev_name(&pdev->dev), ndev);
1752     if (ret < 0) {
1753         netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1754             ndev->irq, ret);
1755         goto err_irq;
1756     }
1757 
1758     priv->pmt_irq = platform_get_irq(pdev, 1);
1759     if (priv->pmt_irq == -ENXIO) {
1760         netdev_err(ndev, "No pmt irq resource\n");
1761         ret = priv->pmt_irq;
1762         goto err_pmt_irq;
1763     }
1764 
1765     ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1766               dev_name(&pdev->dev), ndev);
1767     if (ret < 0) {
1768         netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1769             priv->pmt_irq, ret);
1770         goto err_pmt_irq;
1771     }
1772 
1773     device_set_wakeup_capable(&pdev->dev, 1);
1774     if (device_can_wakeup(priv->device))
1775         priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1776 
1777     ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
1778     if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1779         ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1780                      NETIF_F_RXCSUM;
1781     ndev->features |= ndev->hw_features;
1782     ndev->priv_flags |= IFF_UNICAST_FLT;
1783 
1784     /* MTU range: 46 - 9000 */
1785     ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
1786     ndev->max_mtu = XGMAC_MAX_MTU;
1787 
1788     /* Get the MAC address */
1789     xgmac_get_mac_addr(priv->base, addr, 0);
1790     eth_hw_addr_set(ndev, addr);
1791     if (!is_valid_ether_addr(ndev->dev_addr))
1792         netdev_warn(ndev, "MAC address %pM not valid",
1793              ndev->dev_addr);
1794 
1795     netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1796     ret = register_netdev(ndev);
1797     if (ret)
1798         goto err_reg;
1799 
1800     return 0;
1801 
1802 err_reg:
1803     netif_napi_del(&priv->napi);
1804     free_irq(priv->pmt_irq, ndev);
1805 err_pmt_irq:
1806     free_irq(ndev->irq, ndev);
1807 err_irq:
1808     iounmap(priv->base);
1809 err_io:
1810     free_netdev(ndev);
1811 err_alloc:
1812     release_mem_region(res->start, resource_size(res));
1813     return ret;
1814 }
1815 
1816 /**
1817  * xgmac_remove
1818  * @pdev: platform device pointer
1819  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1820  * changes the link status, releases the DMA descriptor rings,
1821  * unregisters the MDIO bus and unmaps the allocated memory.
1822  */
1823 static int xgmac_remove(struct platform_device *pdev)
1824 {
1825     struct net_device *ndev = platform_get_drvdata(pdev);
1826     struct xgmac_priv *priv = netdev_priv(ndev);
1827     struct resource *res;
1828 
1829     xgmac_mac_disable(priv->base);
1830 
1831     /* Free the IRQ lines */
1832     free_irq(ndev->irq, ndev);
1833     free_irq(priv->pmt_irq, ndev);
1834 
1835     unregister_netdev(ndev);
1836     netif_napi_del(&priv->napi);
1837 
1838     iounmap(priv->base);
1839     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1840     release_mem_region(res->start, resource_size(res));
1841 
1842     free_netdev(ndev);
1843 
1844     return 0;
1845 }
1846 
1847 #ifdef CONFIG_PM_SLEEP
1848 static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1849 {
1850     unsigned int pmt = 0;
1851 
1852     if (mode & WAKE_MAGIC)
1853         pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
1854     if (mode & WAKE_UCAST)
1855         pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1856 
1857     writel(pmt, ioaddr + XGMAC_PMT);
1858 }
1859 
1860 static int xgmac_suspend(struct device *dev)
1861 {
1862     struct net_device *ndev = dev_get_drvdata(dev);
1863     struct xgmac_priv *priv = netdev_priv(ndev);
1864     u32 value;
1865 
1866     if (!ndev || !netif_running(ndev))
1867         return 0;
1868 
1869     netif_device_detach(ndev);
1870     napi_disable(&priv->napi);
1871     writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1872 
1873     if (device_may_wakeup(priv->device)) {
1874         /* Stop TX/RX DMA Only */
1875         value = readl(priv->base + XGMAC_DMA_CONTROL);
1876         value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1877         writel(value, priv->base + XGMAC_DMA_CONTROL);
1878 
1879         xgmac_pmt(priv->base, priv->wolopts);
1880     } else
1881         xgmac_mac_disable(priv->base);
1882 
1883     return 0;
1884 }
1885 
1886 static int xgmac_resume(struct device *dev)
1887 {
1888     struct net_device *ndev = dev_get_drvdata(dev);
1889     struct xgmac_priv *priv = netdev_priv(ndev);
1890     void __iomem *ioaddr = priv->base;
1891 
1892     if (!netif_running(ndev))
1893         return 0;
1894 
1895     xgmac_pmt(ioaddr, 0);
1896 
1897     /* Enable the MAC and DMA */
1898     xgmac_mac_enable(ioaddr);
1899     writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1900     writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1901 
1902     netif_device_attach(ndev);
1903     napi_enable(&priv->napi);
1904 
1905     return 0;
1906 }
1907 #endif /* CONFIG_PM_SLEEP */
1908 
1909 static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1910 
1911 static const struct of_device_id xgmac_of_match[] = {
1912     { .compatible = "calxeda,hb-xgmac", },
1913     {},
1914 };
1915 MODULE_DEVICE_TABLE(of, xgmac_of_match);
1916 
1917 static struct platform_driver xgmac_driver = {
1918     .driver = {
1919         .name = "calxedaxgmac",
1920         .of_match_table = xgmac_of_match,
1921         .pm = &xgmac_pm_ops,
1922     },
1923     .probe = xgmac_probe,
1924     .remove = xgmac_remove,
1925 };
1926 
1927 module_platform_driver(xgmac_driver);
1928 
1929 MODULE_AUTHOR("Calxeda, Inc.");
1930 MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1931 MODULE_LICENSE("GPL v2");