0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/errno.h>
0026 #include <linux/etherdevice.h>
0027 #include <linux/if_arp.h>
0028 #include <linux/if_ether.h>
0029 #include <linux/init.h>
0030 #include <linux/interrupt.h>
0031 #include <linux/jiffies.h>
0032 #include <linux/kernel.h>
0033 #include <linux/kthread.h>
0034 #include <linux/module.h>
0035 #include <linux/moduleparam.h>
0036 #include <linux/netdevice.h>
0037 #include <linux/of.h>
0038 #include <linux/of_device.h>
0039 #include <linux/of_net.h>
0040 #include <linux/sched.h>
0041 #include <linux/skbuff.h>
0042 #include <linux/spi/spi.h>
0043 #include <linux/types.h>
0044
0045 #include "qca_7k.h"
0046 #include "qca_7k_common.h"
0047 #include "qca_debug.h"
0048 #include "qca_spi.h"
0049
0050 #define MAX_DMA_BURST_LEN 5000
0051
0052
0053 #define QCASPI_CLK_SPEED_MIN 1000000
0054 #define QCASPI_CLK_SPEED_MAX 16000000
0055 #define QCASPI_CLK_SPEED 8000000
0056 static int qcaspi_clkspeed;
0057 module_param(qcaspi_clkspeed, int, 0);
0058 MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000.");
0059
0060 #define QCASPI_BURST_LEN_MIN 1
0061 #define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN
0062 static int qcaspi_burst_len = MAX_DMA_BURST_LEN;
0063 module_param(qcaspi_burst_len, int, 0);
0064 MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000.");
0065
0066 #define QCASPI_PLUGGABLE_MIN 0
0067 #define QCASPI_PLUGGABLE_MAX 1
0068 static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
0069 module_param(qcaspi_pluggable, int, 0);
0070 MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
0071
0072 #define QCASPI_WRITE_VERIFY_MIN 0
0073 #define QCASPI_WRITE_VERIFY_MAX 3
0074 static int wr_verify = QCASPI_WRITE_VERIFY_MIN;
0075 module_param(wr_verify, int, 0);
0076 MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3.");
0077
0078 #define QCASPI_TX_TIMEOUT (1 * HZ)
0079 #define QCASPI_QCA7K_REBOOT_TIME_MS 1000
0080
0081 static void
0082 start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause)
0083 {
0084 *intr_cause = 0;
0085
0086 qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
0087 qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause);
0088 netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause);
0089 }
0090
0091 static void
0092 end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause)
0093 {
0094 u16 intr_enable = (SPI_INT_CPU_ON |
0095 SPI_INT_PKT_AVLBL |
0096 SPI_INT_RDBUF_ERR |
0097 SPI_INT_WRBUF_ERR);
0098
0099 qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0);
0100 qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify);
0101 netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause);
0102 }
0103
0104 static u32
0105 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
0106 {
0107 __be16 cmd;
0108 struct spi_message msg;
0109 struct spi_transfer transfer[2];
0110 int ret;
0111
0112 memset(&transfer, 0, sizeof(transfer));
0113 spi_message_init(&msg);
0114
0115 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
0116 transfer[0].tx_buf = &cmd;
0117 transfer[0].len = QCASPI_CMD_LEN;
0118 transfer[1].tx_buf = src;
0119 transfer[1].len = len;
0120
0121 spi_message_add_tail(&transfer[0], &msg);
0122 spi_message_add_tail(&transfer[1], &msg);
0123 ret = spi_sync(qca->spi_dev, &msg);
0124
0125 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
0126 qcaspi_spi_error(qca);
0127 return 0;
0128 }
0129
0130 return len;
0131 }
0132
0133 static u32
0134 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
0135 {
0136 struct spi_message msg;
0137 struct spi_transfer transfer;
0138 int ret;
0139
0140 memset(&transfer, 0, sizeof(transfer));
0141 spi_message_init(&msg);
0142
0143 transfer.tx_buf = src;
0144 transfer.len = len;
0145
0146 spi_message_add_tail(&transfer, &msg);
0147 ret = spi_sync(qca->spi_dev, &msg);
0148
0149 if (ret || (msg.actual_length != len)) {
0150 qcaspi_spi_error(qca);
0151 return 0;
0152 }
0153
0154 return len;
0155 }
0156
0157 static u32
0158 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
0159 {
0160 struct spi_message msg;
0161 __be16 cmd;
0162 struct spi_transfer transfer[2];
0163 int ret;
0164
0165 memset(&transfer, 0, sizeof(transfer));
0166 spi_message_init(&msg);
0167
0168 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
0169 transfer[0].tx_buf = &cmd;
0170 transfer[0].len = QCASPI_CMD_LEN;
0171 transfer[1].rx_buf = dst;
0172 transfer[1].len = len;
0173
0174 spi_message_add_tail(&transfer[0], &msg);
0175 spi_message_add_tail(&transfer[1], &msg);
0176 ret = spi_sync(qca->spi_dev, &msg);
0177
0178 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
0179 qcaspi_spi_error(qca);
0180 return 0;
0181 }
0182
0183 return len;
0184 }
0185
0186 static u32
0187 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
0188 {
0189 struct spi_message msg;
0190 struct spi_transfer transfer;
0191 int ret;
0192
0193 memset(&transfer, 0, sizeof(transfer));
0194 spi_message_init(&msg);
0195
0196 transfer.rx_buf = dst;
0197 transfer.len = len;
0198
0199 spi_message_add_tail(&transfer, &msg);
0200 ret = spi_sync(qca->spi_dev, &msg);
0201
0202 if (ret || (msg.actual_length != len)) {
0203 qcaspi_spi_error(qca);
0204 return 0;
0205 }
0206
0207 return len;
0208 }
0209
0210 static int
0211 qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
0212 {
0213 __be16 tx_data;
0214 struct spi_message msg;
0215 struct spi_transfer transfer;
0216 int ret;
0217
0218 memset(&transfer, 0, sizeof(transfer));
0219
0220 spi_message_init(&msg);
0221
0222 tx_data = cpu_to_be16(cmd);
0223 transfer.len = sizeof(cmd);
0224 transfer.tx_buf = &tx_data;
0225 spi_message_add_tail(&transfer, &msg);
0226
0227 ret = spi_sync(qca->spi_dev, &msg);
0228
0229 if (!ret)
0230 ret = msg.status;
0231
0232 if (ret)
0233 qcaspi_spi_error(qca);
0234
0235 return ret;
0236 }
0237
0238 static int
0239 qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
0240 {
0241 u32 count;
0242 u32 written;
0243 u32 offset;
0244 u32 len;
0245
0246 len = skb->len;
0247
0248 qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify);
0249 if (qca->legacy_mode)
0250 qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
0251
0252 offset = 0;
0253 while (len) {
0254 count = len;
0255 if (count > qca->burst_len)
0256 count = qca->burst_len;
0257
0258 if (qca->legacy_mode) {
0259 written = qcaspi_write_legacy(qca,
0260 skb->data + offset,
0261 count);
0262 } else {
0263 written = qcaspi_write_burst(qca,
0264 skb->data + offset,
0265 count);
0266 }
0267
0268 if (written != count)
0269 return -1;
0270
0271 offset += count;
0272 len -= count;
0273 }
0274
0275 return 0;
0276 }
0277
0278 static int
0279 qcaspi_transmit(struct qcaspi *qca)
0280 {
0281 struct net_device_stats *n_stats = &qca->net_dev->stats;
0282 u16 available = 0;
0283 u32 pkt_len;
0284 u16 new_head;
0285 u16 packets = 0;
0286
0287 if (qca->txr.skb[qca->txr.head] == NULL)
0288 return 0;
0289
0290 qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
0291
0292 if (available > QCASPI_HW_BUF_LEN) {
0293
0294
0295
0296 qca->stats.buf_avail_err++;
0297 return -1;
0298 }
0299
0300 while (qca->txr.skb[qca->txr.head]) {
0301 pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
0302
0303 if (available < pkt_len) {
0304 if (packets == 0)
0305 qca->stats.write_buf_miss++;
0306 break;
0307 }
0308
0309 if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) {
0310 qca->stats.write_err++;
0311 return -1;
0312 }
0313
0314 packets++;
0315 n_stats->tx_packets++;
0316 n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len;
0317 available -= pkt_len;
0318
0319
0320
0321
0322
0323 netif_tx_lock_bh(qca->net_dev);
0324 dev_kfree_skb(qca->txr.skb[qca->txr.head]);
0325 qca->txr.skb[qca->txr.head] = NULL;
0326 qca->txr.size -= pkt_len;
0327 new_head = qca->txr.head + 1;
0328 if (new_head >= qca->txr.count)
0329 new_head = 0;
0330 qca->txr.head = new_head;
0331 if (netif_queue_stopped(qca->net_dev))
0332 netif_wake_queue(qca->net_dev);
0333 netif_tx_unlock_bh(qca->net_dev);
0334 }
0335
0336 return 0;
0337 }
0338
0339 static int
0340 qcaspi_receive(struct qcaspi *qca)
0341 {
0342 struct net_device *net_dev = qca->net_dev;
0343 struct net_device_stats *n_stats = &net_dev->stats;
0344 u16 available = 0;
0345 u32 bytes_read;
0346 u8 *cp;
0347
0348
0349 if (!qca->rx_skb) {
0350 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
0351 net_dev->mtu +
0352 VLAN_ETH_HLEN);
0353 if (!qca->rx_skb) {
0354 netdev_dbg(net_dev, "out of RX resources\n");
0355 qca->stats.out_of_mem++;
0356 return -1;
0357 }
0358 }
0359
0360
0361 qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
0362
0363 netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
0364 available);
0365
0366 if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
0367
0368
0369
0370 qca->stats.buf_avail_err++;
0371 return -1;
0372 } else if (available == 0) {
0373 netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
0374 return -1;
0375 }
0376
0377 qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify);
0378
0379 if (qca->legacy_mode)
0380 qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
0381
0382 while (available) {
0383 u32 count = available;
0384
0385 if (count > qca->burst_len)
0386 count = qca->burst_len;
0387
0388 if (qca->legacy_mode) {
0389 bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer,
0390 count);
0391 } else {
0392 bytes_read = qcaspi_read_burst(qca, qca->rx_buffer,
0393 count);
0394 }
0395
0396 netdev_dbg(net_dev, "available: %d, byte read: %d\n",
0397 available, bytes_read);
0398
0399 if (bytes_read) {
0400 available -= bytes_read;
0401 } else {
0402 qca->stats.read_err++;
0403 return -1;
0404 }
0405
0406 cp = qca->rx_buffer;
0407
0408 while ((bytes_read--) && (qca->rx_skb)) {
0409 s32 retcode;
0410
0411 retcode = qcafrm_fsm_decode(&qca->frm_handle,
0412 qca->rx_skb->data,
0413 skb_tailroom(qca->rx_skb),
0414 *cp);
0415 cp++;
0416 switch (retcode) {
0417 case QCAFRM_GATHER:
0418 case QCAFRM_NOHEAD:
0419 break;
0420 case QCAFRM_NOTAIL:
0421 netdev_dbg(net_dev, "no RX tail\n");
0422 n_stats->rx_errors++;
0423 n_stats->rx_dropped++;
0424 break;
0425 case QCAFRM_INVLEN:
0426 netdev_dbg(net_dev, "invalid RX length\n");
0427 n_stats->rx_errors++;
0428 n_stats->rx_dropped++;
0429 break;
0430 default:
0431 qca->rx_skb->dev = qca->net_dev;
0432 n_stats->rx_packets++;
0433 n_stats->rx_bytes += retcode;
0434 skb_put(qca->rx_skb, retcode);
0435 qca->rx_skb->protocol = eth_type_trans(
0436 qca->rx_skb, qca->rx_skb->dev);
0437 skb_checksum_none_assert(qca->rx_skb);
0438 netif_rx(qca->rx_skb);
0439 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
0440 net_dev->mtu + VLAN_ETH_HLEN);
0441 if (!qca->rx_skb) {
0442 netdev_dbg(net_dev, "out of RX resources\n");
0443 n_stats->rx_errors++;
0444 qca->stats.out_of_mem++;
0445 break;
0446 }
0447 }
0448 }
0449 }
0450
0451 return 0;
0452 }
0453
0454
0455
0456
0457
0458 static int
0459 qcaspi_tx_ring_has_space(struct tx_ring *txr)
0460 {
0461 if (txr->skb[txr->tail])
0462 return 0;
0463
0464 return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
0465 }
0466
0467
0468
0469
0470
0471 static void
0472 qcaspi_flush_tx_ring(struct qcaspi *qca)
0473 {
0474 int i;
0475
0476
0477
0478
0479 netif_tx_lock_bh(qca->net_dev);
0480 for (i = 0; i < TX_RING_MAX_LEN; i++) {
0481 if (qca->txr.skb[i]) {
0482 dev_kfree_skb(qca->txr.skb[i]);
0483 qca->txr.skb[i] = NULL;
0484 qca->net_dev->stats.tx_dropped++;
0485 }
0486 }
0487 qca->txr.tail = 0;
0488 qca->txr.head = 0;
0489 qca->txr.size = 0;
0490 netif_tx_unlock_bh(qca->net_dev);
0491 }
0492
0493 static void
0494 qcaspi_qca7k_sync(struct qcaspi *qca, int event)
0495 {
0496 u16 signature = 0;
0497 u16 spi_config;
0498 u16 wrbuf_space = 0;
0499
0500 if (event == QCASPI_EVENT_CPUON) {
0501
0502
0503
0504 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0505 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0506 if (signature != QCASPI_GOOD_SIGNATURE) {
0507 if (qca->sync == QCASPI_SYNC_READY)
0508 qca->stats.bad_signature++;
0509
0510 qca->sync = QCASPI_SYNC_UNKNOWN;
0511 netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n");
0512 return;
0513 } else {
0514
0515 qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA,
0516 &wrbuf_space);
0517 if (wrbuf_space != QCASPI_HW_BUF_LEN) {
0518 netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n");
0519 qca->sync = QCASPI_SYNC_UNKNOWN;
0520 } else {
0521 netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n");
0522 qca->sync = QCASPI_SYNC_READY;
0523 return;
0524 }
0525 }
0526 }
0527
0528 switch (qca->sync) {
0529 case QCASPI_SYNC_READY:
0530
0531 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0532 if (signature != QCASPI_GOOD_SIGNATURE)
0533 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0534
0535 if (signature != QCASPI_GOOD_SIGNATURE) {
0536 qca->sync = QCASPI_SYNC_UNKNOWN;
0537 qca->stats.bad_signature++;
0538 netdev_dbg(qca->net_dev, "sync: bad signature, restart\n");
0539
0540 return;
0541 }
0542 break;
0543 case QCASPI_SYNC_UNKNOWN:
0544
0545 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0546 if (signature != QCASPI_GOOD_SIGNATURE) {
0547 netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n");
0548 return;
0549 }
0550
0551
0552 netdev_dbg(qca->net_dev, "sync: resetting device.\n");
0553 qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config);
0554 spi_config |= QCASPI_SLAVE_RESET_BIT;
0555 qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0);
0556
0557 qca->sync = QCASPI_SYNC_RESET;
0558 qca->stats.trig_reset++;
0559 qca->reset_count = 0;
0560 break;
0561 case QCASPI_SYNC_RESET:
0562 qca->reset_count++;
0563 netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
0564 qca->reset_count);
0565 if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
0566
0567 qca->sync = QCASPI_SYNC_UNKNOWN;
0568 qca->stats.reset_timeout++;
0569 netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n");
0570 }
0571 break;
0572 }
0573 }
0574
0575 static int
0576 qcaspi_spi_thread(void *data)
0577 {
0578 struct qcaspi *qca = data;
0579 u16 intr_cause = 0;
0580
0581 netdev_info(qca->net_dev, "SPI thread created\n");
0582 while (!kthread_should_stop()) {
0583 set_current_state(TASK_INTERRUPTIBLE);
0584 if ((qca->intr_req == qca->intr_svc) &&
0585 (qca->txr.skb[qca->txr.head] == NULL) &&
0586 (qca->sync == QCASPI_SYNC_READY))
0587 schedule();
0588
0589 set_current_state(TASK_RUNNING);
0590
0591 netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
0592 qca->intr_req - qca->intr_svc,
0593 qca->txr.skb[qca->txr.head]);
0594
0595 qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
0596
0597 if (qca->sync != QCASPI_SYNC_READY) {
0598 netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n",
0599 (unsigned int)qca->sync);
0600 netif_stop_queue(qca->net_dev);
0601 netif_carrier_off(qca->net_dev);
0602 qcaspi_flush_tx_ring(qca);
0603 msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
0604 }
0605
0606 if (qca->intr_svc != qca->intr_req) {
0607 qca->intr_svc = qca->intr_req;
0608 start_spi_intr_handling(qca, &intr_cause);
0609
0610 if (intr_cause & SPI_INT_CPU_ON) {
0611 qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
0612
0613
0614 if (qca->sync != QCASPI_SYNC_READY)
0615 continue;
0616
0617 qca->stats.device_reset++;
0618 netif_wake_queue(qca->net_dev);
0619 netif_carrier_on(qca->net_dev);
0620 }
0621
0622 if (intr_cause & SPI_INT_RDBUF_ERR) {
0623
0624 netdev_dbg(qca->net_dev, "===> rdbuf error!\n");
0625 qca->stats.read_buf_err++;
0626 qca->sync = QCASPI_SYNC_UNKNOWN;
0627 continue;
0628 }
0629
0630 if (intr_cause & SPI_INT_WRBUF_ERR) {
0631
0632 netdev_dbg(qca->net_dev, "===> wrbuf error!\n");
0633 qca->stats.write_buf_err++;
0634 qca->sync = QCASPI_SYNC_UNKNOWN;
0635 continue;
0636 }
0637
0638
0639
0640
0641 if (qca->sync == QCASPI_SYNC_READY) {
0642 if (intr_cause & SPI_INT_PKT_AVLBL)
0643 qcaspi_receive(qca);
0644 }
0645
0646 end_spi_intr_handling(qca, intr_cause);
0647 }
0648
0649 if (qca->sync == QCASPI_SYNC_READY)
0650 qcaspi_transmit(qca);
0651 }
0652 set_current_state(TASK_RUNNING);
0653 netdev_info(qca->net_dev, "SPI thread exit\n");
0654
0655 return 0;
0656 }
0657
0658 static irqreturn_t
0659 qcaspi_intr_handler(int irq, void *data)
0660 {
0661 struct qcaspi *qca = data;
0662
0663 qca->intr_req++;
0664 if (qca->spi_thread)
0665 wake_up_process(qca->spi_thread);
0666
0667 return IRQ_HANDLED;
0668 }
0669
0670 static int
0671 qcaspi_netdev_open(struct net_device *dev)
0672 {
0673 struct qcaspi *qca = netdev_priv(dev);
0674 int ret = 0;
0675
0676 if (!qca)
0677 return -EINVAL;
0678
0679 qca->intr_req = 1;
0680 qca->intr_svc = 0;
0681 qca->sync = QCASPI_SYNC_UNKNOWN;
0682 qcafrm_fsm_init_spi(&qca->frm_handle);
0683
0684 qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
0685 qca, "%s", dev->name);
0686
0687 if (IS_ERR(qca->spi_thread)) {
0688 netdev_err(dev, "%s: unable to start kernel thread.\n",
0689 QCASPI_DRV_NAME);
0690 return PTR_ERR(qca->spi_thread);
0691 }
0692
0693 ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
0694 dev->name, qca);
0695 if (ret) {
0696 netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
0697 QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
0698 kthread_stop(qca->spi_thread);
0699 return ret;
0700 }
0701
0702
0703
0704 return 0;
0705 }
0706
0707 static int
0708 qcaspi_netdev_close(struct net_device *dev)
0709 {
0710 struct qcaspi *qca = netdev_priv(dev);
0711
0712 netif_stop_queue(dev);
0713
0714 qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
0715 free_irq(qca->spi_dev->irq, qca);
0716
0717 kthread_stop(qca->spi_thread);
0718 qca->spi_thread = NULL;
0719 qcaspi_flush_tx_ring(qca);
0720
0721 return 0;
0722 }
0723
0724 static netdev_tx_t
0725 qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
0726 {
0727 u32 frame_len;
0728 u8 *ptmp;
0729 struct qcaspi *qca = netdev_priv(dev);
0730 u16 new_tail;
0731 struct sk_buff *tskb;
0732 u8 pad_len = 0;
0733
0734 if (skb->len < QCAFRM_MIN_LEN)
0735 pad_len = QCAFRM_MIN_LEN - skb->len;
0736
0737 if (qca->txr.skb[qca->txr.tail]) {
0738 netdev_warn(qca->net_dev, "queue was unexpectedly full!\n");
0739 netif_stop_queue(qca->net_dev);
0740 qca->stats.ring_full++;
0741 return NETDEV_TX_BUSY;
0742 }
0743
0744 if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) ||
0745 (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) {
0746 tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN,
0747 QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC);
0748 if (!tskb) {
0749 qca->stats.out_of_mem++;
0750 return NETDEV_TX_BUSY;
0751 }
0752 dev_kfree_skb(skb);
0753 skb = tskb;
0754 }
0755
0756 frame_len = skb->len + pad_len;
0757
0758 ptmp = skb_push(skb, QCAFRM_HEADER_LEN);
0759 qcafrm_create_header(ptmp, frame_len);
0760
0761 if (pad_len) {
0762 ptmp = skb_put_zero(skb, pad_len);
0763 }
0764
0765 ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
0766 qcafrm_create_footer(ptmp);
0767
0768 netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n",
0769 skb->len);
0770
0771 qca->txr.size += skb->len + QCASPI_HW_PKT_LEN;
0772
0773 new_tail = qca->txr.tail + 1;
0774 if (new_tail >= qca->txr.count)
0775 new_tail = 0;
0776
0777 qca->txr.skb[qca->txr.tail] = skb;
0778 qca->txr.tail = new_tail;
0779
0780 if (!qcaspi_tx_ring_has_space(&qca->txr)) {
0781 netif_stop_queue(qca->net_dev);
0782 qca->stats.ring_full++;
0783 }
0784
0785 netif_trans_update(dev);
0786
0787 if (qca->spi_thread)
0788 wake_up_process(qca->spi_thread);
0789
0790 return NETDEV_TX_OK;
0791 }
0792
0793 static void
0794 qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
0795 {
0796 struct qcaspi *qca = netdev_priv(dev);
0797
0798 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
0799 jiffies, jiffies - dev_trans_start(dev));
0800 qca->net_dev->stats.tx_errors++;
0801
0802 qca->sync = QCASPI_SYNC_UNKNOWN;
0803
0804 if (qca->spi_thread)
0805 wake_up_process(qca->spi_thread);
0806 }
0807
0808 static int
0809 qcaspi_netdev_init(struct net_device *dev)
0810 {
0811 struct qcaspi *qca = netdev_priv(dev);
0812
0813 dev->mtu = QCAFRM_MAX_MTU;
0814 dev->type = ARPHRD_ETHER;
0815 qca->clkspeed = qcaspi_clkspeed;
0816 qca->burst_len = qcaspi_burst_len;
0817 qca->spi_thread = NULL;
0818 qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
0819 QCAFRM_FOOTER_LEN + 4) * 4;
0820
0821 memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
0822
0823 qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL);
0824 if (!qca->rx_buffer)
0825 return -ENOBUFS;
0826
0827 qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
0828 VLAN_ETH_HLEN);
0829 if (!qca->rx_skb) {
0830 kfree(qca->rx_buffer);
0831 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
0832 return -ENOBUFS;
0833 }
0834
0835 return 0;
0836 }
0837
0838 static void
0839 qcaspi_netdev_uninit(struct net_device *dev)
0840 {
0841 struct qcaspi *qca = netdev_priv(dev);
0842
0843 kfree(qca->rx_buffer);
0844 qca->buffer_size = 0;
0845 dev_kfree_skb(qca->rx_skb);
0846 }
0847
0848 static const struct net_device_ops qcaspi_netdev_ops = {
0849 .ndo_init = qcaspi_netdev_init,
0850 .ndo_uninit = qcaspi_netdev_uninit,
0851 .ndo_open = qcaspi_netdev_open,
0852 .ndo_stop = qcaspi_netdev_close,
0853 .ndo_start_xmit = qcaspi_netdev_xmit,
0854 .ndo_set_mac_address = eth_mac_addr,
0855 .ndo_tx_timeout = qcaspi_netdev_tx_timeout,
0856 .ndo_validate_addr = eth_validate_addr,
0857 };
0858
0859 static void
0860 qcaspi_netdev_setup(struct net_device *dev)
0861 {
0862 struct qcaspi *qca = NULL;
0863
0864 dev->netdev_ops = &qcaspi_netdev_ops;
0865 qcaspi_set_ethtool_ops(dev);
0866 dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
0867 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
0868 dev->tx_queue_len = 100;
0869
0870
0871 dev->min_mtu = QCAFRM_MIN_MTU;
0872 dev->max_mtu = QCAFRM_MAX_MTU;
0873
0874 qca = netdev_priv(dev);
0875 memset(qca, 0, sizeof(struct qcaspi));
0876
0877 memset(&qca->txr, 0, sizeof(qca->txr));
0878 qca->txr.count = TX_RING_MAX_LEN;
0879 }
0880
0881 static const struct of_device_id qca_spi_of_match[] = {
0882 { .compatible = "qca,qca7000" },
0883 { }
0884 };
0885 MODULE_DEVICE_TABLE(of, qca_spi_of_match);
0886
0887 static int
0888 qca_spi_probe(struct spi_device *spi)
0889 {
0890 struct qcaspi *qca = NULL;
0891 struct net_device *qcaspi_devs = NULL;
0892 u8 legacy_mode = 0;
0893 u16 signature;
0894 int ret;
0895
0896 if (!spi->dev.of_node) {
0897 dev_err(&spi->dev, "Missing device tree\n");
0898 return -EINVAL;
0899 }
0900
0901 legacy_mode = of_property_read_bool(spi->dev.of_node,
0902 "qca,legacy-mode");
0903
0904 if (qcaspi_clkspeed == 0) {
0905 if (spi->max_speed_hz)
0906 qcaspi_clkspeed = spi->max_speed_hz;
0907 else
0908 qcaspi_clkspeed = QCASPI_CLK_SPEED;
0909 }
0910
0911 if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
0912 (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
0913 dev_err(&spi->dev, "Invalid clkspeed: %d\n",
0914 qcaspi_clkspeed);
0915 return -EINVAL;
0916 }
0917
0918 if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
0919 (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
0920 dev_err(&spi->dev, "Invalid burst len: %d\n",
0921 qcaspi_burst_len);
0922 return -EINVAL;
0923 }
0924
0925 if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
0926 (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
0927 dev_err(&spi->dev, "Invalid pluggable: %d\n",
0928 qcaspi_pluggable);
0929 return -EINVAL;
0930 }
0931
0932 if (wr_verify < QCASPI_WRITE_VERIFY_MIN ||
0933 wr_verify > QCASPI_WRITE_VERIFY_MAX) {
0934 dev_err(&spi->dev, "Invalid write verify: %d\n",
0935 wr_verify);
0936 return -EINVAL;
0937 }
0938
0939 dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
0940 QCASPI_DRV_VERSION,
0941 qcaspi_clkspeed,
0942 qcaspi_burst_len,
0943 qcaspi_pluggable);
0944
0945 spi->mode = SPI_MODE_3;
0946 spi->max_speed_hz = qcaspi_clkspeed;
0947 if (spi_setup(spi) < 0) {
0948 dev_err(&spi->dev, "Unable to setup SPI device\n");
0949 return -EFAULT;
0950 }
0951
0952 qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi));
0953 if (!qcaspi_devs)
0954 return -ENOMEM;
0955
0956 qcaspi_netdev_setup(qcaspi_devs);
0957 SET_NETDEV_DEV(qcaspi_devs, &spi->dev);
0958
0959 qca = netdev_priv(qcaspi_devs);
0960 if (!qca) {
0961 free_netdev(qcaspi_devs);
0962 dev_err(&spi->dev, "Fail to retrieve private structure\n");
0963 return -ENOMEM;
0964 }
0965 qca->net_dev = qcaspi_devs;
0966 qca->spi_dev = spi;
0967 qca->legacy_mode = legacy_mode;
0968
0969 spi_set_drvdata(spi, qcaspi_devs);
0970
0971 ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
0972 if (ret) {
0973 eth_hw_addr_random(qca->net_dev);
0974 dev_info(&spi->dev, "Using random MAC address: %pM\n",
0975 qca->net_dev->dev_addr);
0976 }
0977
0978 netif_carrier_off(qca->net_dev);
0979
0980 if (!qcaspi_pluggable) {
0981 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0982 qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
0983
0984 if (signature != QCASPI_GOOD_SIGNATURE) {
0985 dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
0986 signature);
0987 free_netdev(qcaspi_devs);
0988 return -EFAULT;
0989 }
0990 }
0991
0992 if (register_netdev(qcaspi_devs)) {
0993 dev_err(&spi->dev, "Unable to register net device %s\n",
0994 qcaspi_devs->name);
0995 free_netdev(qcaspi_devs);
0996 return -EFAULT;
0997 }
0998
0999 qcaspi_init_device_debugfs(qca);
1000
1001 return 0;
1002 }
1003
1004 static void
1005 qca_spi_remove(struct spi_device *spi)
1006 {
1007 struct net_device *qcaspi_devs = spi_get_drvdata(spi);
1008 struct qcaspi *qca = netdev_priv(qcaspi_devs);
1009
1010 qcaspi_remove_device_debugfs(qca);
1011
1012 unregister_netdev(qcaspi_devs);
1013 free_netdev(qcaspi_devs);
1014 }
1015
1016 static const struct spi_device_id qca_spi_id[] = {
1017 { "qca7000", 0 },
1018 { }
1019 };
1020 MODULE_DEVICE_TABLE(spi, qca_spi_id);
1021
1022 static struct spi_driver qca_spi_driver = {
1023 .driver = {
1024 .name = QCASPI_DRV_NAME,
1025 .of_match_table = qca_spi_of_match,
1026 },
1027 .id_table = qca_spi_id,
1028 .probe = qca_spi_probe,
1029 .remove = qca_spi_remove,
1030 };
1031 module_spi_driver(qca_spi_driver);
1032
1033 MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
1034 MODULE_AUTHOR("Qualcomm Atheros Communications");
1035 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
1036 MODULE_LICENSE("Dual BSD/GPL");
1037 MODULE_VERSION(QCASPI_DRV_VERSION);