0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0012
0013 #include <linux/clk.h>
0014 #include <linux/crc32.h>
0015 #include <linux/etherdevice.h>
0016 #include <linux/module.h>
0017 #include <linux/of.h>
0018 #include <linux/of_mdio.h>
0019 #include <linux/of_net.h>
0020 #include <linux/phy.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/soc/nxp/lpc32xx-misc.h>
0024
0025 #define MODNAME "lpc-eth"
0026 #define DRV_VERSION "1.00"
0027
0028 #define ENET_MAXF_SIZE 1536
0029 #define ENET_RX_DESC 48
0030 #define ENET_TX_DESC 16
0031
0032 #define NAPI_WEIGHT 16
0033
0034
0035
0036
0037 #define LPC_ENET_MAC1(x) (x + 0x000)
0038 #define LPC_ENET_MAC2(x) (x + 0x004)
0039 #define LPC_ENET_IPGT(x) (x + 0x008)
0040 #define LPC_ENET_IPGR(x) (x + 0x00C)
0041 #define LPC_ENET_CLRT(x) (x + 0x010)
0042 #define LPC_ENET_MAXF(x) (x + 0x014)
0043 #define LPC_ENET_SUPP(x) (x + 0x018)
0044 #define LPC_ENET_TEST(x) (x + 0x01C)
0045 #define LPC_ENET_MCFG(x) (x + 0x020)
0046 #define LPC_ENET_MCMD(x) (x + 0x024)
0047 #define LPC_ENET_MADR(x) (x + 0x028)
0048 #define LPC_ENET_MWTD(x) (x + 0x02C)
0049 #define LPC_ENET_MRDD(x) (x + 0x030)
0050 #define LPC_ENET_MIND(x) (x + 0x034)
0051 #define LPC_ENET_SA0(x) (x + 0x040)
0052 #define LPC_ENET_SA1(x) (x + 0x044)
0053 #define LPC_ENET_SA2(x) (x + 0x048)
0054 #define LPC_ENET_COMMAND(x) (x + 0x100)
0055 #define LPC_ENET_STATUS(x) (x + 0x104)
0056 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
0057 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
0058 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
0059 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
0060 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
0061 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
0062 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
0063 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
0064 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
0065 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
0066 #define LPC_ENET_TSV0(x) (x + 0x158)
0067 #define LPC_ENET_TSV1(x) (x + 0x15C)
0068 #define LPC_ENET_RSV(x) (x + 0x160)
0069 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
0070 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
0071 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
0072 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
0073 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
0074 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
0075 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
0076 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
0077 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
0078 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
0079 #define LPC_ENET_INTSET(x) (x + 0xFEC)
0080 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
0081
0082
0083
0084
0085 #define LPC_MAC1_RECV_ENABLE (1 << 0)
0086 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
0087 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
0088 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
0089 #define LPC_MAC1_LOOPBACK (1 << 4)
0090 #define LPC_MAC1_RESET_TX (1 << 8)
0091 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
0092 #define LPC_MAC1_RESET_RX (1 << 10)
0093 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
0094 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
0095 #define LPC_MAC1_SOFT_RESET (1 << 15)
0096
0097
0098
0099
0100 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
0101 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
0102 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
0103 #define LPC_MAC2_DELAYED_CRC (1 << 3)
0104 #define LPC_MAC2_CRC_ENABLE (1 << 4)
0105 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
0106 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
0107 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
0108 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
0109 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
0110 #define LPC_MAC2_NO_BACKOFF (1 << 12)
0111 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
0112 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
0113
0114
0115
0116
0117 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
0118
0119
0120
0121
0122 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
0123 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
0124
0125
0126
0127
0128 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
0129 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
0130
0131
0132
0133
0134 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
0135
0136
0137
0138
0139 #define LPC_SUPP_SPEED (1 << 8)
0140 #define LPC_SUPP_RESET_RMII (1 << 11)
0141
0142
0143
0144
0145 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
0146 #define LPC_TEST_PAUSE (1 << 1)
0147 #define LPC_TEST_BACKPRESSURE (1 << 2)
0148
0149
0150
0151
0152 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
0153 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
0154 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
0155 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
0156 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
0157 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
0158 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
0159 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
0160 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
0161 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
0162 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
0163
0164
0165
0166
0167 #define LPC_MCMD_READ (1 << 0)
0168 #define LPC_MCMD_SCAN (1 << 1)
0169
0170
0171
0172
0173 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
0174 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
0175
0176
0177
0178
0179 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
0180
0181
0182
0183
0184 #define LPC_MRDD_READ_MASK 0xFFFF
0185
0186
0187
0188
0189 #define LPC_MIND_BUSY (1 << 0)
0190 #define LPC_MIND_SCANNING (1 << 1)
0191 #define LPC_MIND_NOT_VALID (1 << 2)
0192 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
0193
0194
0195
0196
0197 #define LPC_COMMAND_RXENABLE (1 << 0)
0198 #define LPC_COMMAND_TXENABLE (1 << 1)
0199 #define LPC_COMMAND_REG_RESET (1 << 3)
0200 #define LPC_COMMAND_TXRESET (1 << 4)
0201 #define LPC_COMMAND_RXRESET (1 << 5)
0202 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
0203 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
0204 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
0205 #define LPC_COMMAND_RMII (1 << 9)
0206 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
0207
0208
0209
0210
0211 #define LPC_STATUS_RXACTIVE (1 << 0)
0212 #define LPC_STATUS_TXACTIVE (1 << 1)
0213
0214
0215
0216
0217 #define LPC_TSV0_CRC_ERROR (1 << 0)
0218 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
0219 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
0220 #define LPC_TSV0_DONE (1 << 3)
0221 #define LPC_TSV0_MULTICAST (1 << 4)
0222 #define LPC_TSV0_BROADCAST (1 << 5)
0223 #define LPC_TSV0_PACKET_DEFER (1 << 6)
0224 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
0225 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
0226 #define LPC_TSV0_LATE_COLLISION (1 << 9)
0227 #define LPC_TSV0_GIANT (1 << 10)
0228 #define LPC_TSV0_UNDERRUN (1 << 11)
0229 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
0230 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
0231 #define LPC_TSV0_PAUSE (1 << 29)
0232 #define LPC_TSV0_BACKPRESSURE (1 << 30)
0233 #define LPC_TSV0_VLAN (1 << 31)
0234
0235
0236
0237
0238 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
0239 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
0240
0241
0242
0243
0244 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
0245 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
0246 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
0247 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
0248 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
0249 #define LPC_RSV_CRC_ERROR (1 << 20)
0250 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
0251 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
0252 #define LPC_RSV_RECEIVE_OK (1 << 23)
0253 #define LPC_RSV_MULTICAST (1 << 24)
0254 #define LPC_RSV_BROADCAST (1 << 25)
0255 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
0256 #define LPC_RSV_CONTROL_FRAME (1 << 27)
0257 #define LPC_RSV_PAUSE (1 << 28)
0258 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
0259 #define LPC_RSV_VLAN (1 << 30)
0260
0261
0262
0263
0264 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
0265 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
0266
0267
0268
0269
0270 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
0271
0272
0273
0274
0275
0276 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
0277 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
0278 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
0279 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
0280 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
0281 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
0282
0283
0284
0285
0286 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
0287 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
0288
0289
0290
0291
0292 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
0293 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
0294
0295
0296
0297
0298
0299 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
0300 #define LPC_MACINT_RXERRORONINT (1 << 1)
0301 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
0302 #define LPC_MACINT_RXDONEINTEN (1 << 3)
0303 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
0304 #define LPC_MACINT_TXERRORINTEN (1 << 5)
0305 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
0306 #define LPC_MACINT_TXDONEINTEN (1 << 7)
0307 #define LPC_MACINT_SOFTINTEN (1 << 12)
0308 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
0309
0310
0311
0312
0313 #define LPC_POWERDOWN_MACAHB (1 << 31)
0314
0315 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
0316 {
0317 if (dev && dev->of_node) {
0318 const char *mode = of_get_property(dev->of_node,
0319 "phy-mode", NULL);
0320 if (mode && !strcmp(mode, "mii"))
0321 return PHY_INTERFACE_MODE_MII;
0322 }
0323 return PHY_INTERFACE_MODE_RMII;
0324 }
0325
0326 static bool use_iram_for_net(struct device *dev)
0327 {
0328 if (dev && dev->of_node)
0329 return of_property_read_bool(dev->of_node, "use-iram");
0330 return false;
0331 }
0332
0333
0334 #define RXSTATUS_SIZE 0x000007FF
0335 #define RXSTATUS_CONTROL (1 << 18)
0336 #define RXSTATUS_VLAN (1 << 19)
0337 #define RXSTATUS_FILTER (1 << 20)
0338 #define RXSTATUS_MULTICAST (1 << 21)
0339 #define RXSTATUS_BROADCAST (1 << 22)
0340 #define RXSTATUS_CRC (1 << 23)
0341 #define RXSTATUS_SYMBOL (1 << 24)
0342 #define RXSTATUS_LENGTH (1 << 25)
0343 #define RXSTATUS_RANGE (1 << 26)
0344 #define RXSTATUS_ALIGN (1 << 27)
0345 #define RXSTATUS_OVERRUN (1 << 28)
0346 #define RXSTATUS_NODESC (1 << 29)
0347 #define RXSTATUS_LAST (1 << 30)
0348 #define RXSTATUS_ERROR (1 << 31)
0349
0350 #define RXSTATUS_STATUS_ERROR \
0351 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
0352 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
0353
0354
0355 #define RXDESC_CONTROL_SIZE 0x000007FF
0356 #define RXDESC_CONTROL_INT (1 << 31)
0357
0358
0359 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
0360 #define TXSTATUS_DEFER (1 << 25)
0361 #define TXSTATUS_EXCESSDEFER (1 << 26)
0362 #define TXSTATUS_EXCESSCOLL (1 << 27)
0363 #define TXSTATUS_LATECOLL (1 << 28)
0364 #define TXSTATUS_UNDERRUN (1 << 29)
0365 #define TXSTATUS_NODESC (1 << 30)
0366 #define TXSTATUS_ERROR (1 << 31)
0367
0368
0369 #define TXDESC_CONTROL_SIZE 0x000007FF
0370 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
0371 #define TXDESC_CONTROL_HUGE (1 << 27)
0372 #define TXDESC_CONTROL_PAD (1 << 28)
0373 #define TXDESC_CONTROL_CRC (1 << 29)
0374 #define TXDESC_CONTROL_LAST (1 << 30)
0375 #define TXDESC_CONTROL_INT (1 << 31)
0376
0377
0378
0379
0380 struct txrx_desc_t {
0381 __le32 packet;
0382 __le32 control;
0383 };
0384 struct rx_status_t {
0385 __le32 statusinfo;
0386 __le32 statushashcrc;
0387 };
0388
0389
0390
0391
0392 struct netdata_local {
0393 struct platform_device *pdev;
0394 struct net_device *ndev;
0395 struct device_node *phy_node;
0396 spinlock_t lock;
0397 void __iomem *net_base;
0398 u32 msg_enable;
0399 unsigned int skblen[ENET_TX_DESC];
0400 unsigned int last_tx_idx;
0401 unsigned int num_used_tx_buffs;
0402 struct mii_bus *mii_bus;
0403 struct clk *clk;
0404 dma_addr_t dma_buff_base_p;
0405 void *dma_buff_base_v;
0406 size_t dma_buff_size;
0407 struct txrx_desc_t *tx_desc_v;
0408 u32 *tx_stat_v;
0409 void *tx_buff_v;
0410 struct txrx_desc_t *rx_desc_v;
0411 struct rx_status_t *rx_stat_v;
0412 void *rx_buff_v;
0413 int link;
0414 int speed;
0415 int duplex;
0416 struct napi_struct napi;
0417 };
0418
0419
0420
0421
0422 static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
0423 {
0424 u32 tmp;
0425
0426
0427 tmp = mac[0] | ((u32)mac[1] << 8);
0428 writel(tmp, LPC_ENET_SA2(pldat->net_base));
0429 tmp = mac[2] | ((u32)mac[3] << 8);
0430 writel(tmp, LPC_ENET_SA1(pldat->net_base));
0431 tmp = mac[4] | ((u32)mac[5] << 8);
0432 writel(tmp, LPC_ENET_SA0(pldat->net_base));
0433
0434 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
0435 }
0436
0437 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
0438 {
0439 u32 tmp;
0440
0441
0442 tmp = readl(LPC_ENET_SA2(pldat->net_base));
0443 mac[0] = tmp & 0xFF;
0444 mac[1] = tmp >> 8;
0445 tmp = readl(LPC_ENET_SA1(pldat->net_base));
0446 mac[2] = tmp & 0xFF;
0447 mac[3] = tmp >> 8;
0448 tmp = readl(LPC_ENET_SA0(pldat->net_base));
0449 mac[4] = tmp & 0xFF;
0450 mac[5] = tmp >> 8;
0451 }
0452
0453 static void __lpc_params_setup(struct netdata_local *pldat)
0454 {
0455 u32 tmp;
0456
0457 if (pldat->duplex == DUPLEX_FULL) {
0458 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
0459 tmp |= LPC_MAC2_FULL_DUPLEX;
0460 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
0461 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
0462 tmp |= LPC_COMMAND_FULLDUPLEX;
0463 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
0464 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
0465 } else {
0466 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
0467 tmp &= ~LPC_MAC2_FULL_DUPLEX;
0468 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
0469 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
0470 tmp &= ~LPC_COMMAND_FULLDUPLEX;
0471 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
0472 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
0473 }
0474
0475 if (pldat->speed == SPEED_100)
0476 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
0477 else
0478 writel(0, LPC_ENET_SUPP(pldat->net_base));
0479 }
0480
0481 static void __lpc_eth_reset(struct netdata_local *pldat)
0482 {
0483
0484 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
0485 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
0486 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
0487 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
0488 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
0489 }
0490
0491 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
0492 {
0493
0494 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
0495
0496
0497 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
0498 LPC_ENET_MCFG(pldat->net_base));
0499
0500 return 0;
0501 }
0502
0503 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
0504 {
0505 phys_addr_t phaddr;
0506
0507 phaddr = addr - pldat->dma_buff_base_v;
0508 phaddr += pldat->dma_buff_base_p;
0509
0510 return phaddr;
0511 }
0512
0513 static void lpc_eth_enable_int(void __iomem *regbase)
0514 {
0515 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
0516 LPC_ENET_INTENABLE(regbase));
0517 }
0518
0519 static void lpc_eth_disable_int(void __iomem *regbase)
0520 {
0521 writel(0, LPC_ENET_INTENABLE(regbase));
0522 }
0523
0524
0525 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
0526 {
0527 u32 *ptxstat;
0528 void *tbuff;
0529 int i;
0530 struct txrx_desc_t *ptxrxdesc;
0531 struct rx_status_t *prxstat;
0532
0533 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
0534
0535
0536 pldat->tx_desc_v = tbuff;
0537 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
0538
0539 pldat->tx_stat_v = tbuff;
0540 tbuff += sizeof(u32) * ENET_TX_DESC;
0541
0542 tbuff = PTR_ALIGN(tbuff, 16);
0543 pldat->tx_buff_v = tbuff;
0544 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
0545
0546
0547 pldat->rx_desc_v = tbuff;
0548 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
0549
0550 tbuff = PTR_ALIGN(tbuff, 16);
0551 pldat->rx_stat_v = tbuff;
0552 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
0553
0554 tbuff = PTR_ALIGN(tbuff, 16);
0555 pldat->rx_buff_v = tbuff;
0556 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
0557
0558
0559 for (i = 0; i < ENET_TX_DESC; i++) {
0560 ptxstat = &pldat->tx_stat_v[i];
0561 ptxrxdesc = &pldat->tx_desc_v[i];
0562
0563 ptxrxdesc->packet = __va_to_pa(
0564 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
0565 ptxrxdesc->control = 0;
0566 *ptxstat = 0;
0567 }
0568
0569
0570 for (i = 0; i < ENET_RX_DESC; i++) {
0571 prxstat = &pldat->rx_stat_v[i];
0572 ptxrxdesc = &pldat->rx_desc_v[i];
0573
0574 ptxrxdesc->packet = __va_to_pa(
0575 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
0576 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
0577 prxstat->statusinfo = 0;
0578 prxstat->statushashcrc = 0;
0579 }
0580
0581
0582
0583
0584 writel((ENET_TX_DESC - 1),
0585 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
0586 writel(__va_to_pa(pldat->tx_desc_v, pldat),
0587 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
0588 writel(__va_to_pa(pldat->tx_stat_v, pldat),
0589 LPC_ENET_TXSTATUS(pldat->net_base));
0590 writel((ENET_RX_DESC - 1),
0591 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
0592 writel(__va_to_pa(pldat->rx_desc_v, pldat),
0593 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
0594 writel(__va_to_pa(pldat->rx_stat_v, pldat),
0595 LPC_ENET_RXSTATUS(pldat->net_base));
0596 }
0597
0598 static void __lpc_eth_init(struct netdata_local *pldat)
0599 {
0600 u32 tmp;
0601
0602
0603 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
0604 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
0605 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
0606 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
0607 tmp &= ~LPC_MAC1_RECV_ENABLE;
0608 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
0609
0610
0611 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
0612 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
0613 LPC_ENET_MAC2(pldat->net_base));
0614 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
0615
0616
0617 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
0618 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
0619 LPC_ENET_CLRT(pldat->net_base));
0620 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
0621
0622 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
0623 writel(LPC_COMMAND_PASSRUNTFRAME,
0624 LPC_ENET_COMMAND(pldat->net_base));
0625 else {
0626 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
0627 LPC_ENET_COMMAND(pldat->net_base));
0628 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
0629 }
0630
0631 __lpc_params_setup(pldat);
0632
0633
0634 __lpc_txrx_desc_setup(pldat);
0635
0636
0637 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
0638 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
0639
0640
0641 pldat->num_used_tx_buffs = 0;
0642 pldat->last_tx_idx =
0643 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
0644
0645
0646 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
0647 smp_wmb();
0648 lpc_eth_enable_int(pldat->net_base);
0649
0650
0651 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
0652 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
0653 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
0654 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
0655 tmp |= LPC_MAC1_RECV_ENABLE;
0656 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
0657 }
0658
0659 static void __lpc_eth_shutdown(struct netdata_local *pldat)
0660 {
0661
0662 __lpc_eth_reset(pldat);
0663 writel(0, LPC_ENET_MAC1(pldat->net_base));
0664 writel(0, LPC_ENET_MAC2(pldat->net_base));
0665 }
0666
0667
0668
0669
0670 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
0671 {
0672 struct netdata_local *pldat = bus->priv;
0673 unsigned long timeout = jiffies + msecs_to_jiffies(100);
0674 int lps;
0675
0676 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
0677 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
0678
0679
0680 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
0681 if (time_after(jiffies, timeout))
0682 return -EIO;
0683 cpu_relax();
0684 }
0685
0686 lps = readl(LPC_ENET_MRDD(pldat->net_base));
0687 writel(0, LPC_ENET_MCMD(pldat->net_base));
0688
0689 return lps;
0690 }
0691
0692 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
0693 u16 phydata)
0694 {
0695 struct netdata_local *pldat = bus->priv;
0696 unsigned long timeout = jiffies + msecs_to_jiffies(100);
0697
0698 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
0699 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
0700
0701
0702 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
0703 if (time_after(jiffies, timeout))
0704 return -EIO;
0705 cpu_relax();
0706 }
0707
0708 return 0;
0709 }
0710
0711 static int lpc_mdio_reset(struct mii_bus *bus)
0712 {
0713 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
0714 }
0715
0716 static void lpc_handle_link_change(struct net_device *ndev)
0717 {
0718 struct netdata_local *pldat = netdev_priv(ndev);
0719 struct phy_device *phydev = ndev->phydev;
0720 unsigned long flags;
0721
0722 bool status_change = false;
0723
0724 spin_lock_irqsave(&pldat->lock, flags);
0725
0726 if (phydev->link) {
0727 if ((pldat->speed != phydev->speed) ||
0728 (pldat->duplex != phydev->duplex)) {
0729 pldat->speed = phydev->speed;
0730 pldat->duplex = phydev->duplex;
0731 status_change = true;
0732 }
0733 }
0734
0735 if (phydev->link != pldat->link) {
0736 if (!phydev->link) {
0737 pldat->speed = 0;
0738 pldat->duplex = -1;
0739 }
0740 pldat->link = phydev->link;
0741
0742 status_change = true;
0743 }
0744
0745 spin_unlock_irqrestore(&pldat->lock, flags);
0746
0747 if (status_change)
0748 __lpc_params_setup(pldat);
0749 }
0750
0751 static int lpc_mii_probe(struct net_device *ndev)
0752 {
0753 struct netdata_local *pldat = netdev_priv(ndev);
0754 struct phy_device *phydev;
0755
0756
0757 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
0758 netdev_info(ndev, "using MII interface\n");
0759 else
0760 netdev_info(ndev, "using RMII interface\n");
0761
0762 if (pldat->phy_node)
0763 phydev = of_phy_find_device(pldat->phy_node);
0764 else
0765 phydev = phy_find_first(pldat->mii_bus);
0766 if (!phydev) {
0767 netdev_err(ndev, "no PHY found\n");
0768 return -ENODEV;
0769 }
0770
0771 phydev = phy_connect(ndev, phydev_name(phydev),
0772 &lpc_handle_link_change,
0773 lpc_phy_interface_mode(&pldat->pdev->dev));
0774 if (IS_ERR(phydev)) {
0775 netdev_err(ndev, "Could not attach to PHY\n");
0776 return PTR_ERR(phydev);
0777 }
0778
0779 phy_set_max_speed(phydev, SPEED_100);
0780
0781 pldat->link = 0;
0782 pldat->speed = 0;
0783 pldat->duplex = -1;
0784
0785 phy_attached_info(phydev);
0786
0787 return 0;
0788 }
0789
0790 static int lpc_mii_init(struct netdata_local *pldat)
0791 {
0792 struct device_node *node;
0793 int err = -ENXIO;
0794
0795 pldat->mii_bus = mdiobus_alloc();
0796 if (!pldat->mii_bus) {
0797 err = -ENOMEM;
0798 goto err_out;
0799 }
0800
0801
0802 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
0803 writel(LPC_COMMAND_PASSRUNTFRAME,
0804 LPC_ENET_COMMAND(pldat->net_base));
0805 else {
0806 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
0807 LPC_ENET_COMMAND(pldat->net_base));
0808 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
0809 }
0810
0811 pldat->mii_bus->name = "lpc_mii_bus";
0812 pldat->mii_bus->read = &lpc_mdio_read;
0813 pldat->mii_bus->write = &lpc_mdio_write;
0814 pldat->mii_bus->reset = &lpc_mdio_reset;
0815 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
0816 pldat->pdev->name, pldat->pdev->id);
0817 pldat->mii_bus->priv = pldat;
0818 pldat->mii_bus->parent = &pldat->pdev->dev;
0819
0820 node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
0821 err = of_mdiobus_register(pldat->mii_bus, node);
0822 of_node_put(node);
0823 if (err)
0824 goto err_out_unregister_bus;
0825
0826 err = lpc_mii_probe(pldat->ndev);
0827 if (err)
0828 goto err_out_unregister_bus;
0829
0830 return 0;
0831
0832 err_out_unregister_bus:
0833 mdiobus_unregister(pldat->mii_bus);
0834 mdiobus_free(pldat->mii_bus);
0835 err_out:
0836 return err;
0837 }
0838
0839 static void __lpc_handle_xmit(struct net_device *ndev)
0840 {
0841 struct netdata_local *pldat = netdev_priv(ndev);
0842 u32 txcidx, *ptxstat, txstat;
0843
0844 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
0845 while (pldat->last_tx_idx != txcidx) {
0846 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
0847
0848
0849 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
0850 txstat = *ptxstat;
0851
0852
0853 pldat->num_used_tx_buffs--;
0854 pldat->last_tx_idx++;
0855 if (pldat->last_tx_idx >= ENET_TX_DESC)
0856 pldat->last_tx_idx = 0;
0857
0858
0859 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
0860
0861
0862 if (txstat & TXSTATUS_ERROR) {
0863 if (txstat & TXSTATUS_UNDERRUN) {
0864
0865 ndev->stats.tx_fifo_errors++;
0866 }
0867 if (txstat & TXSTATUS_LATECOLL) {
0868
0869 ndev->stats.tx_aborted_errors++;
0870 }
0871 if (txstat & TXSTATUS_EXCESSCOLL) {
0872
0873 ndev->stats.tx_aborted_errors++;
0874 }
0875 if (txstat & TXSTATUS_EXCESSDEFER) {
0876
0877 ndev->stats.tx_aborted_errors++;
0878 }
0879 ndev->stats.tx_errors++;
0880 } else {
0881
0882 ndev->stats.tx_packets++;
0883 ndev->stats.tx_bytes += skblen;
0884 }
0885
0886 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
0887 }
0888
0889 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
0890 if (netif_queue_stopped(ndev))
0891 netif_wake_queue(ndev);
0892 }
0893 }
0894
0895 static int __lpc_handle_recv(struct net_device *ndev, int budget)
0896 {
0897 struct netdata_local *pldat = netdev_priv(ndev);
0898 struct sk_buff *skb;
0899 u32 rxconsidx, len, ethst;
0900 struct rx_status_t *prxstat;
0901 int rx_done = 0;
0902
0903
0904 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
0905 while (rx_done < budget && rxconsidx !=
0906 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
0907
0908 prxstat = &pldat->rx_stat_v[rxconsidx];
0909 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
0910
0911
0912 ethst = prxstat->statusinfo;
0913 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
0914 (RXSTATUS_ERROR | RXSTATUS_RANGE))
0915 ethst &= ~RXSTATUS_ERROR;
0916
0917 if (ethst & RXSTATUS_ERROR) {
0918 int si = prxstat->statusinfo;
0919
0920 if (si & RXSTATUS_OVERRUN) {
0921
0922 ndev->stats.rx_fifo_errors++;
0923 } else if (si & RXSTATUS_CRC) {
0924
0925 ndev->stats.rx_crc_errors++;
0926 } else if (si & RXSTATUS_LENGTH) {
0927
0928 ndev->stats.rx_length_errors++;
0929 } else if (si & RXSTATUS_ERROR) {
0930
0931 ndev->stats.rx_length_errors++;
0932 }
0933 ndev->stats.rx_errors++;
0934 } else {
0935
0936 skb = dev_alloc_skb(len);
0937 if (!skb) {
0938 ndev->stats.rx_dropped++;
0939 } else {
0940
0941 skb_put_data(skb,
0942 pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
0943 len);
0944
0945
0946 skb->protocol = eth_type_trans(skb, ndev);
0947 netif_receive_skb(skb);
0948 ndev->stats.rx_packets++;
0949 ndev->stats.rx_bytes += len;
0950 }
0951 }
0952
0953
0954 rxconsidx = rxconsidx + 1;
0955 if (rxconsidx >= ENET_RX_DESC)
0956 rxconsidx = 0;
0957 writel(rxconsidx,
0958 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
0959 rx_done++;
0960 }
0961
0962 return rx_done;
0963 }
0964
0965 static int lpc_eth_poll(struct napi_struct *napi, int budget)
0966 {
0967 struct netdata_local *pldat = container_of(napi,
0968 struct netdata_local, napi);
0969 struct net_device *ndev = pldat->ndev;
0970 int rx_done = 0;
0971 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
0972
0973 __netif_tx_lock(txq, smp_processor_id());
0974 __lpc_handle_xmit(ndev);
0975 __netif_tx_unlock(txq);
0976 rx_done = __lpc_handle_recv(ndev, budget);
0977
0978 if (rx_done < budget) {
0979 napi_complete_done(napi, rx_done);
0980 lpc_eth_enable_int(pldat->net_base);
0981 }
0982
0983 return rx_done;
0984 }
0985
0986 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
0987 {
0988 struct net_device *ndev = dev_id;
0989 struct netdata_local *pldat = netdev_priv(ndev);
0990 u32 tmp;
0991
0992 spin_lock(&pldat->lock);
0993
0994 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
0995
0996 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
0997
0998 lpc_eth_disable_int(pldat->net_base);
0999 if (likely(napi_schedule_prep(&pldat->napi)))
1000 __napi_schedule(&pldat->napi);
1001
1002 spin_unlock(&pldat->lock);
1003
1004 return IRQ_HANDLED;
1005 }
1006
1007 static int lpc_eth_close(struct net_device *ndev)
1008 {
1009 unsigned long flags;
1010 struct netdata_local *pldat = netdev_priv(ndev);
1011
1012 if (netif_msg_ifdown(pldat))
1013 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1014
1015 napi_disable(&pldat->napi);
1016 netif_stop_queue(ndev);
1017
1018 spin_lock_irqsave(&pldat->lock, flags);
1019 __lpc_eth_reset(pldat);
1020 netif_carrier_off(ndev);
1021 writel(0, LPC_ENET_MAC1(pldat->net_base));
1022 writel(0, LPC_ENET_MAC2(pldat->net_base));
1023 spin_unlock_irqrestore(&pldat->lock, flags);
1024
1025 if (ndev->phydev)
1026 phy_stop(ndev->phydev);
1027 clk_disable_unprepare(pldat->clk);
1028
1029 return 0;
1030 }
1031
1032 static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
1033 struct net_device *ndev)
1034 {
1035 struct netdata_local *pldat = netdev_priv(ndev);
1036 u32 len, txidx;
1037 u32 *ptxstat;
1038 struct txrx_desc_t *ptxrxdesc;
1039
1040 len = skb->len;
1041
1042 spin_lock_irq(&pldat->lock);
1043
1044 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1045
1046
1047
1048 netif_stop_queue(ndev);
1049 spin_unlock_irq(&pldat->lock);
1050 WARN(1, "BUG! TX request when no free TX buffers!\n");
1051 return NETDEV_TX_BUSY;
1052 }
1053
1054
1055 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1056
1057
1058 ptxstat = &pldat->tx_stat_v[txidx];
1059 *ptxstat = 0;
1060 ptxrxdesc = &pldat->tx_desc_v[txidx];
1061 ptxrxdesc->control =
1062 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1063
1064
1065 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1066
1067
1068 pldat->skblen[txidx] = len;
1069 pldat->num_used_tx_buffs++;
1070
1071
1072 txidx++;
1073 if (txidx >= ENET_TX_DESC)
1074 txidx = 0;
1075 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1076
1077
1078 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1079 netif_stop_queue(ndev);
1080
1081 spin_unlock_irq(&pldat->lock);
1082
1083 dev_kfree_skb(skb);
1084 return NETDEV_TX_OK;
1085 }
1086
1087 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1088 {
1089 struct sockaddr *addr = p;
1090 struct netdata_local *pldat = netdev_priv(ndev);
1091 unsigned long flags;
1092
1093 if (!is_valid_ether_addr(addr->sa_data))
1094 return -EADDRNOTAVAIL;
1095 eth_hw_addr_set(ndev, addr->sa_data);
1096
1097 spin_lock_irqsave(&pldat->lock, flags);
1098
1099
1100 __lpc_set_mac(pldat, ndev->dev_addr);
1101
1102 spin_unlock_irqrestore(&pldat->lock, flags);
1103
1104 return 0;
1105 }
1106
1107 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1108 {
1109 struct netdata_local *pldat = netdev_priv(ndev);
1110 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1111 struct netdev_hw_addr *ha;
1112 u32 tmp32, hash_val, hashlo, hashhi;
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&pldat->lock, flags);
1116
1117
1118 __lpc_set_mac(pldat, ndev->dev_addr);
1119
1120 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1121
1122 if (ndev->flags & IFF_PROMISC)
1123 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1124 LPC_RXFLTRW_ACCEPTUMULTICAST;
1125 if (ndev->flags & IFF_ALLMULTI)
1126 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1127
1128 if (netdev_hw_addr_list_count(mcptr))
1129 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1130
1131 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1132
1133
1134
1135 hashlo = 0x0;
1136 hashhi = 0x0;
1137
1138
1139 netdev_hw_addr_list_for_each(ha, mcptr) {
1140 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1141
1142 if (hash_val >= 32)
1143 hashhi |= 1 << (hash_val - 32);
1144 else
1145 hashlo |= 1 << hash_val;
1146 }
1147
1148 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1149 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1150
1151 spin_unlock_irqrestore(&pldat->lock, flags);
1152 }
1153
1154 static int lpc_eth_open(struct net_device *ndev)
1155 {
1156 struct netdata_local *pldat = netdev_priv(ndev);
1157 int ret;
1158
1159 if (netif_msg_ifup(pldat))
1160 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1161
1162 ret = clk_prepare_enable(pldat->clk);
1163 if (ret)
1164 return ret;
1165
1166
1167 phy_resume(ndev->phydev);
1168
1169
1170 __lpc_eth_reset(pldat);
1171 __lpc_eth_init(pldat);
1172
1173
1174 phy_start(ndev->phydev);
1175 netif_start_queue(ndev);
1176 napi_enable(&pldat->napi);
1177
1178 return 0;
1179 }
1180
1181
1182
1183
1184 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1185 struct ethtool_drvinfo *info)
1186 {
1187 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1188 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1189 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1190 sizeof(info->bus_info));
1191 }
1192
1193 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1194 {
1195 struct netdata_local *pldat = netdev_priv(ndev);
1196
1197 return pldat->msg_enable;
1198 }
1199
1200 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1201 {
1202 struct netdata_local *pldat = netdev_priv(ndev);
1203
1204 pldat->msg_enable = level;
1205 }
1206
1207 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1208 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1209 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1210 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1211 .get_link = ethtool_op_get_link,
1212 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1213 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1214 };
1215
1216 static const struct net_device_ops lpc_netdev_ops = {
1217 .ndo_open = lpc_eth_open,
1218 .ndo_stop = lpc_eth_close,
1219 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1220 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1221 .ndo_eth_ioctl = phy_do_ioctl_running,
1222 .ndo_set_mac_address = lpc_set_mac_address,
1223 .ndo_validate_addr = eth_validate_addr,
1224 };
1225
1226 static int lpc_eth_drv_probe(struct platform_device *pdev)
1227 {
1228 struct device *dev = &pdev->dev;
1229 struct device_node *np = dev->of_node;
1230 struct netdata_local *pldat;
1231 struct net_device *ndev;
1232 dma_addr_t dma_handle;
1233 struct resource *res;
1234 u8 addr[ETH_ALEN];
1235 int irq, ret;
1236
1237
1238 lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev));
1239
1240
1241 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1242 irq = platform_get_irq(pdev, 0);
1243 if (!res || irq < 0) {
1244 dev_err(dev, "error getting resources.\n");
1245 ret = -ENXIO;
1246 goto err_exit;
1247 }
1248
1249
1250 ndev = alloc_etherdev(sizeof(struct netdata_local));
1251 if (!ndev) {
1252 dev_err(dev, "could not allocate device.\n");
1253 ret = -ENOMEM;
1254 goto err_exit;
1255 }
1256
1257 SET_NETDEV_DEV(ndev, dev);
1258
1259 pldat = netdev_priv(ndev);
1260 pldat->pdev = pdev;
1261 pldat->ndev = ndev;
1262
1263 spin_lock_init(&pldat->lock);
1264
1265
1266 ndev->irq = irq;
1267
1268
1269 pldat->clk = clk_get(dev, NULL);
1270 if (IS_ERR(pldat->clk)) {
1271 dev_err(dev, "error getting clock.\n");
1272 ret = PTR_ERR(pldat->clk);
1273 goto err_out_free_dev;
1274 }
1275
1276
1277 ret = clk_prepare_enable(pldat->clk);
1278 if (ret)
1279 goto err_out_clk_put;
1280
1281
1282 pldat->net_base = ioremap(res->start, resource_size(res));
1283 if (!pldat->net_base) {
1284 dev_err(dev, "failed to map registers\n");
1285 ret = -ENOMEM;
1286 goto err_out_disable_clocks;
1287 }
1288 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1289 ndev->name, ndev);
1290 if (ret) {
1291 dev_err(dev, "error requesting interrupt.\n");
1292 goto err_out_iounmap;
1293 }
1294
1295
1296 ndev->netdev_ops = &lpc_netdev_ops;
1297 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1298 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1299
1300
1301 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1302 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1303
1304 if (use_iram_for_net(dev)) {
1305 if (pldat->dma_buff_size >
1306 lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) {
1307 pldat->dma_buff_base_v = NULL;
1308 pldat->dma_buff_size = 0;
1309 netdev_err(ndev,
1310 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1311 }
1312 }
1313
1314 if (pldat->dma_buff_base_v == NULL) {
1315 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1316 if (ret)
1317 goto err_out_free_irq;
1318
1319 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1320
1321
1322
1323
1324 pldat->dma_buff_base_v =
1325 dma_alloc_coherent(dev,
1326 pldat->dma_buff_size, &dma_handle,
1327 GFP_KERNEL);
1328 if (pldat->dma_buff_base_v == NULL) {
1329 ret = -ENOMEM;
1330 goto err_out_free_irq;
1331 }
1332 }
1333 pldat->dma_buff_base_p = dma_handle;
1334
1335 netdev_dbg(ndev, "IO address space :%pR\n", res);
1336 netdev_dbg(ndev, "IO address size :%zd\n",
1337 (size_t)resource_size(res));
1338 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1339 pldat->net_base);
1340 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1341 netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
1342 netdev_dbg(ndev, "DMA buffer P address :%pad\n",
1343 &pldat->dma_buff_base_p);
1344 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1345 pldat->dma_buff_base_v);
1346
1347 pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
1348
1349
1350 __lpc_get_mac(pldat, addr);
1351 eth_hw_addr_set(ndev, addr);
1352
1353 if (!is_valid_ether_addr(ndev->dev_addr)) {
1354 of_get_ethdev_address(np, ndev);
1355 }
1356 if (!is_valid_ether_addr(ndev->dev_addr))
1357 eth_hw_addr_random(ndev);
1358
1359
1360 __lpc_eth_shutdown(pldat);
1361
1362
1363 pldat->msg_enable = NETIF_MSG_LINK;
1364
1365
1366 __lpc_mii_mngt_reset(pldat);
1367
1368
1369
1370
1371 pldat->link = 0;
1372 pldat->speed = 100;
1373 pldat->duplex = DUPLEX_FULL;
1374 __lpc_params_setup(pldat);
1375
1376 netif_napi_add_weight(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1377
1378 ret = register_netdev(ndev);
1379 if (ret) {
1380 dev_err(dev, "Cannot register net device, aborting.\n");
1381 goto err_out_dma_unmap;
1382 }
1383 platform_set_drvdata(pdev, ndev);
1384
1385 ret = lpc_mii_init(pldat);
1386 if (ret)
1387 goto err_out_unregister_netdev;
1388
1389 netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
1390 (unsigned long)res->start, ndev->irq);
1391
1392 device_init_wakeup(dev, 1);
1393 device_set_wakeup_enable(dev, 0);
1394
1395 return 0;
1396
1397 err_out_unregister_netdev:
1398 unregister_netdev(ndev);
1399 err_out_dma_unmap:
1400 if (!use_iram_for_net(dev) ||
1401 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1402 dma_free_coherent(dev, pldat->dma_buff_size,
1403 pldat->dma_buff_base_v,
1404 pldat->dma_buff_base_p);
1405 err_out_free_irq:
1406 free_irq(ndev->irq, ndev);
1407 err_out_iounmap:
1408 iounmap(pldat->net_base);
1409 err_out_disable_clocks:
1410 clk_disable_unprepare(pldat->clk);
1411 err_out_clk_put:
1412 clk_put(pldat->clk);
1413 err_out_free_dev:
1414 free_netdev(ndev);
1415 err_exit:
1416 pr_err("%s: not found (%d).\n", MODNAME, ret);
1417 return ret;
1418 }
1419
1420 static int lpc_eth_drv_remove(struct platform_device *pdev)
1421 {
1422 struct net_device *ndev = platform_get_drvdata(pdev);
1423 struct netdata_local *pldat = netdev_priv(ndev);
1424
1425 unregister_netdev(ndev);
1426
1427 if (!use_iram_for_net(&pldat->pdev->dev) ||
1428 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1429 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1430 pldat->dma_buff_base_v,
1431 pldat->dma_buff_base_p);
1432 free_irq(ndev->irq, ndev);
1433 iounmap(pldat->net_base);
1434 mdiobus_unregister(pldat->mii_bus);
1435 mdiobus_free(pldat->mii_bus);
1436 clk_disable_unprepare(pldat->clk);
1437 clk_put(pldat->clk);
1438 free_netdev(ndev);
1439
1440 return 0;
1441 }
1442
1443 #ifdef CONFIG_PM
1444 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1445 pm_message_t state)
1446 {
1447 struct net_device *ndev = platform_get_drvdata(pdev);
1448 struct netdata_local *pldat = netdev_priv(ndev);
1449
1450 if (device_may_wakeup(&pdev->dev))
1451 enable_irq_wake(ndev->irq);
1452
1453 if (ndev) {
1454 if (netif_running(ndev)) {
1455 netif_device_detach(ndev);
1456 __lpc_eth_shutdown(pldat);
1457 clk_disable_unprepare(pldat->clk);
1458
1459
1460
1461
1462
1463 __lpc_eth_reset(pldat);
1464 }
1465 }
1466
1467 return 0;
1468 }
1469
1470 static int lpc_eth_drv_resume(struct platform_device *pdev)
1471 {
1472 struct net_device *ndev = platform_get_drvdata(pdev);
1473 struct netdata_local *pldat;
1474 int ret;
1475
1476 if (device_may_wakeup(&pdev->dev))
1477 disable_irq_wake(ndev->irq);
1478
1479 if (ndev) {
1480 if (netif_running(ndev)) {
1481 pldat = netdev_priv(ndev);
1482
1483
1484 ret = clk_enable(pldat->clk);
1485 if (ret)
1486 return ret;
1487
1488
1489 __lpc_eth_reset(pldat);
1490 __lpc_eth_init(pldat);
1491
1492 netif_device_attach(ndev);
1493 }
1494 }
1495
1496 return 0;
1497 }
1498 #endif
1499
1500 static const struct of_device_id lpc_eth_match[] = {
1501 { .compatible = "nxp,lpc-eth" },
1502 { }
1503 };
1504 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1505
1506 static struct platform_driver lpc_eth_driver = {
1507 .probe = lpc_eth_drv_probe,
1508 .remove = lpc_eth_drv_remove,
1509 #ifdef CONFIG_PM
1510 .suspend = lpc_eth_drv_suspend,
1511 .resume = lpc_eth_drv_resume,
1512 #endif
1513 .driver = {
1514 .name = MODNAME,
1515 .of_match_table = lpc_eth_match,
1516 },
1517 };
1518
1519 module_platform_driver(lpc_eth_driver);
1520
1521 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1522 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1523 MODULE_DESCRIPTION("LPC Ethernet Driver");
1524 MODULE_LICENSE("GPL");