0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/atomic.h>
0014 #include <linux/clk.h>
0015 #include <linux/device.h>
0016 #include <linux/err.h>
0017 #include <linux/gpio.h>
0018 #include <linux/if_ether.h>
0019 #include <linux/if_arp.h>
0020 #include <linux/if_phonet.h>
0021 #include <linux/init.h>
0022 #include <linux/irq.h>
0023 #include <linux/list.h>
0024 #include <linux/module.h>
0025 #include <linux/netdevice.h>
0026 #include <linux/notifier.h>
0027 #include <linux/scatterlist.h>
0028 #include <linux/skbuff.h>
0029 #include <linux/slab.h>
0030 #include <linux/spinlock.h>
0031 #include <linux/timer.h>
0032 #include <linux/hsi/hsi.h>
0033 #include <linux/hsi/ssi_protocol.h>
0034
0035 void ssi_waketest(struct hsi_client *cl, unsigned int enable);
0036
0037 #define SSIP_TXQUEUE_LEN 100
0038 #define SSIP_MAX_MTU 65535
0039 #define SSIP_DEFAULT_MTU 4000
0040 #define PN_MEDIA_SOS 21
0041 #define SSIP_MIN_PN_HDR 6
0042 #define SSIP_WDTOUT 2000
0043 #define SSIP_KATOUT 15
0044 #define SSIP_MAX_CMDS 5
0045 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
0046 #define SSIP_CMT_LOADER_SYNC 0x11223344
0047
0048
0049
0050 #define SSIP_COMMAND(data) ((data) >> 28)
0051 #define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
0052
0053 #define SSIP_SW_BREAK 0
0054 #define SSIP_BOOTINFO_REQ 1
0055 #define SSIP_BOOTINFO_RESP 2
0056 #define SSIP_WAKETEST_RESULT 3
0057 #define SSIP_START_TRANS 4
0058 #define SSIP_READY 5
0059
0060 #define SSIP_DATA_VERSION(data) ((data) & 0xff)
0061 #define SSIP_LOCAL_VERID 1
0062 #define SSIP_WAKETEST_OK 0
0063 #define SSIP_WAKETEST_FAILED 1
0064 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
0065 #define SSIP_MSG_ID(data) ((data) & 0xff)
0066
0067 #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
0068
0069 #define SSIP_BOOTINFO_REQ_CMD(ver) \
0070 SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
0071 #define SSIP_BOOTINFO_RESP_CMD(ver) \
0072 SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
0073 #define SSIP_START_TRANS_CMD(pdulen, id) \
0074 SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
0075 #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
0076 #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
0077
0078 #define SSIP_WAKETEST_FLAG 0
0079
0080
0081 enum {
0082 INIT,
0083 HANDSHAKE,
0084 ACTIVE,
0085 };
0086
0087
0088 enum {
0089 SEND_IDLE,
0090 WAIT4READY,
0091 SEND_READY,
0092 SENDING,
0093 SENDING_SWBREAK,
0094 };
0095
0096
0097 enum {
0098 RECV_IDLE,
0099 RECV_READY,
0100 RECEIVING,
0101 };
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 struct ssi_protocol {
0126 unsigned int main_state;
0127 unsigned int send_state;
0128 unsigned int recv_state;
0129 unsigned long flags;
0130 u8 rxid;
0131 u8 txid;
0132 unsigned int txqueue_len;
0133 struct timer_list tx_wd;
0134 struct timer_list rx_wd;
0135 struct timer_list keep_alive;
0136 spinlock_t lock;
0137 struct net_device *netdev;
0138 struct list_head txqueue;
0139 struct list_head cmdqueue;
0140 struct work_struct work;
0141 struct hsi_client *cl;
0142 struct list_head link;
0143 atomic_t tx_usecnt;
0144 int channel_id_cmd;
0145 int channel_id_data;
0146 };
0147
0148
0149 static LIST_HEAD(ssip_list);
0150
0151 static void ssip_rxcmd_complete(struct hsi_msg *msg);
0152
0153 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
0154 {
0155 u32 *data;
0156
0157 data = sg_virt(msg->sgt.sgl);
0158 *data = cmd;
0159 }
0160
0161 static inline u32 ssip_get_cmd(struct hsi_msg *msg)
0162 {
0163 u32 *data;
0164
0165 data = sg_virt(msg->sgt.sgl);
0166
0167 return *data;
0168 }
0169
0170 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
0171 {
0172 skb_frag_t *frag;
0173 struct scatterlist *sg;
0174 int i;
0175
0176 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
0177
0178 sg = msg->sgt.sgl;
0179 sg_set_buf(sg, skb->data, skb_headlen(skb));
0180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0181 sg = sg_next(sg);
0182 BUG_ON(!sg);
0183 frag = &skb_shinfo(skb)->frags[i];
0184 sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
0185 skb_frag_off(frag));
0186 }
0187 }
0188
0189 static void ssip_free_data(struct hsi_msg *msg)
0190 {
0191 struct sk_buff *skb;
0192
0193 skb = msg->context;
0194 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
0195 skb);
0196 msg->destructor = NULL;
0197 dev_kfree_skb(skb);
0198 hsi_free_msg(msg);
0199 }
0200
0201 static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
0202 struct sk_buff *skb, gfp_t flags)
0203 {
0204 struct hsi_msg *msg;
0205
0206 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
0207 if (!msg)
0208 return NULL;
0209 ssip_skb_to_msg(skb, msg);
0210 msg->destructor = ssip_free_data;
0211 msg->channel = ssi->channel_id_data;
0212 msg->context = skb;
0213
0214 return msg;
0215 }
0216
0217 static inline void ssip_release_cmd(struct hsi_msg *msg)
0218 {
0219 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
0220
0221 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
0222 spin_lock_bh(&ssi->lock);
0223 list_add_tail(&msg->link, &ssi->cmdqueue);
0224 spin_unlock_bh(&ssi->lock);
0225 }
0226
0227 static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
0228 {
0229 struct hsi_msg *msg;
0230
0231 BUG_ON(list_empty(&ssi->cmdqueue));
0232
0233 spin_lock_bh(&ssi->lock);
0234 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
0235 list_del(&msg->link);
0236 spin_unlock_bh(&ssi->lock);
0237 msg->destructor = ssip_release_cmd;
0238
0239 return msg;
0240 }
0241
0242 static void ssip_free_cmds(struct ssi_protocol *ssi)
0243 {
0244 struct hsi_msg *msg, *tmp;
0245
0246 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
0247 list_del(&msg->link);
0248 msg->destructor = NULL;
0249 kfree(sg_virt(msg->sgt.sgl));
0250 hsi_free_msg(msg);
0251 }
0252 }
0253
0254 static int ssip_alloc_cmds(struct ssi_protocol *ssi)
0255 {
0256 struct hsi_msg *msg;
0257 u32 *buf;
0258 unsigned int i;
0259
0260 for (i = 0; i < SSIP_MAX_CMDS; i++) {
0261 msg = hsi_alloc_msg(1, GFP_KERNEL);
0262 if (!msg)
0263 goto out;
0264 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
0265 if (!buf) {
0266 hsi_free_msg(msg);
0267 goto out;
0268 }
0269 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
0270 msg->channel = ssi->channel_id_cmd;
0271 list_add_tail(&msg->link, &ssi->cmdqueue);
0272 }
0273
0274 return 0;
0275 out:
0276 ssip_free_cmds(ssi);
0277
0278 return -ENOMEM;
0279 }
0280
0281 static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
0282 {
0283 ssi->recv_state = state;
0284 switch (state) {
0285 case RECV_IDLE:
0286 del_timer(&ssi->rx_wd);
0287 if (ssi->send_state == SEND_IDLE)
0288 del_timer(&ssi->keep_alive);
0289 break;
0290 case RECV_READY:
0291
0292 if (atomic_read(&ssi->tx_usecnt))
0293 break;
0294 fallthrough;
0295 case RECEIVING:
0296 mod_timer(&ssi->keep_alive, jiffies +
0297 msecs_to_jiffies(SSIP_KATOUT));
0298 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
0299 break;
0300 default:
0301 break;
0302 }
0303 }
0304
0305 static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
0306 {
0307 ssi->send_state = state;
0308 switch (state) {
0309 case SEND_IDLE:
0310 case SEND_READY:
0311 del_timer(&ssi->tx_wd);
0312 if (ssi->recv_state == RECV_IDLE)
0313 del_timer(&ssi->keep_alive);
0314 break;
0315 case WAIT4READY:
0316 case SENDING:
0317 case SENDING_SWBREAK:
0318 mod_timer(&ssi->keep_alive,
0319 jiffies + msecs_to_jiffies(SSIP_KATOUT));
0320 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
0321 break;
0322 default:
0323 break;
0324 }
0325 }
0326
0327 struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
0328 {
0329 struct hsi_client *master = ERR_PTR(-ENODEV);
0330 struct ssi_protocol *ssi;
0331
0332 list_for_each_entry(ssi, &ssip_list, link)
0333 if (slave->device.parent == ssi->cl->device.parent) {
0334 master = ssi->cl;
0335 break;
0336 }
0337
0338 return master;
0339 }
0340 EXPORT_SYMBOL_GPL(ssip_slave_get_master);
0341
0342 int ssip_slave_start_tx(struct hsi_client *master)
0343 {
0344 struct ssi_protocol *ssi = hsi_client_drvdata(master);
0345
0346 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
0347 spin_lock_bh(&ssi->lock);
0348 if (ssi->send_state == SEND_IDLE) {
0349 ssip_set_txstate(ssi, WAIT4READY);
0350 hsi_start_tx(master);
0351 }
0352 spin_unlock_bh(&ssi->lock);
0353 atomic_inc(&ssi->tx_usecnt);
0354
0355 return 0;
0356 }
0357 EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
0358
0359 int ssip_slave_stop_tx(struct hsi_client *master)
0360 {
0361 struct ssi_protocol *ssi = hsi_client_drvdata(master);
0362
0363 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
0364
0365 if (atomic_dec_and_test(&ssi->tx_usecnt)) {
0366 spin_lock_bh(&ssi->lock);
0367 if ((ssi->send_state == SEND_READY) ||
0368 (ssi->send_state == WAIT4READY)) {
0369 ssip_set_txstate(ssi, SEND_IDLE);
0370 hsi_stop_tx(master);
0371 }
0372 spin_unlock_bh(&ssi->lock);
0373 }
0374 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
0375
0376 return 0;
0377 }
0378 EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
0379
0380 int ssip_slave_running(struct hsi_client *master)
0381 {
0382 struct ssi_protocol *ssi = hsi_client_drvdata(master);
0383 return netif_running(ssi->netdev);
0384 }
0385 EXPORT_SYMBOL_GPL(ssip_slave_running);
0386
0387 static void ssip_reset(struct hsi_client *cl)
0388 {
0389 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0390 struct list_head *head, *tmp;
0391 struct hsi_msg *msg;
0392
0393 if (netif_running(ssi->netdev))
0394 netif_carrier_off(ssi->netdev);
0395 hsi_flush(cl);
0396 spin_lock_bh(&ssi->lock);
0397 if (ssi->send_state != SEND_IDLE)
0398 hsi_stop_tx(cl);
0399 spin_unlock_bh(&ssi->lock);
0400 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
0401 ssi_waketest(cl, 0);
0402 spin_lock_bh(&ssi->lock);
0403 del_timer(&ssi->rx_wd);
0404 del_timer(&ssi->tx_wd);
0405 del_timer(&ssi->keep_alive);
0406 ssi->main_state = 0;
0407 ssi->send_state = 0;
0408 ssi->recv_state = 0;
0409 ssi->flags = 0;
0410 ssi->rxid = 0;
0411 ssi->txid = 0;
0412 list_for_each_safe(head, tmp, &ssi->txqueue) {
0413 msg = list_entry(head, struct hsi_msg, link);
0414 dev_dbg(&cl->device, "Pending TX data\n");
0415 list_del(head);
0416 ssip_free_data(msg);
0417 }
0418 ssi->txqueue_len = 0;
0419 spin_unlock_bh(&ssi->lock);
0420 }
0421
0422 static void ssip_dump_state(struct hsi_client *cl)
0423 {
0424 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0425 struct hsi_msg *msg;
0426
0427 spin_lock_bh(&ssi->lock);
0428 dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
0429 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
0430 dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
0431 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
0432 "Online" : "Offline");
0433 dev_err(&cl->device, "Wake test %d\n",
0434 test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
0435 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
0436 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
0437
0438 list_for_each_entry(msg, &ssi->txqueue, link)
0439 dev_err(&cl->device, "pending TX data (%p)\n", msg);
0440 spin_unlock_bh(&ssi->lock);
0441 }
0442
0443 static void ssip_error(struct hsi_client *cl)
0444 {
0445 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0446 struct hsi_msg *msg;
0447
0448 ssip_dump_state(cl);
0449 ssip_reset(cl);
0450 msg = ssip_claim_cmd(ssi);
0451 msg->complete = ssip_rxcmd_complete;
0452 hsi_async_read(cl, msg);
0453 }
0454
0455 static void ssip_keep_alive(struct timer_list *t)
0456 {
0457 struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
0458 struct hsi_client *cl = ssi->cl;
0459
0460 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
0461 ssi->main_state, ssi->recv_state, ssi->send_state);
0462
0463 spin_lock(&ssi->lock);
0464 if (ssi->recv_state == RECV_IDLE)
0465 switch (ssi->send_state) {
0466 case SEND_READY:
0467 if (atomic_read(&ssi->tx_usecnt) == 0)
0468 break;
0469 fallthrough;
0470
0471
0472
0473
0474 case SEND_IDLE:
0475 spin_unlock(&ssi->lock);
0476 return;
0477 }
0478 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
0479 spin_unlock(&ssi->lock);
0480 }
0481
0482 static void ssip_rx_wd(struct timer_list *t)
0483 {
0484 struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
0485 struct hsi_client *cl = ssi->cl;
0486
0487 dev_err(&cl->device, "Watchdog triggered\n");
0488 ssip_error(cl);
0489 }
0490
0491 static void ssip_tx_wd(struct timer_list *t)
0492 {
0493 struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
0494 struct hsi_client *cl = ssi->cl;
0495
0496 dev_err(&cl->device, "Watchdog triggered\n");
0497 ssip_error(cl);
0498 }
0499
0500 static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
0501 {
0502 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0503 struct hsi_msg *msg;
0504
0505 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
0506 msg = ssip_claim_cmd(ssi);
0507 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
0508 msg->complete = ssip_release_cmd;
0509 hsi_async_write(cl, msg);
0510 dev_dbg(&cl->device, "Issuing RX command\n");
0511 msg = ssip_claim_cmd(ssi);
0512 msg->complete = ssip_rxcmd_complete;
0513 hsi_async_read(cl, msg);
0514 }
0515
0516 static void ssip_start_rx(struct hsi_client *cl)
0517 {
0518 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0519 struct hsi_msg *msg;
0520
0521 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
0522 ssi->recv_state);
0523 spin_lock_bh(&ssi->lock);
0524
0525
0526
0527
0528 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
0529 spin_unlock_bh(&ssi->lock);
0530 return;
0531 }
0532 ssip_set_rxstate(ssi, RECV_READY);
0533 spin_unlock_bh(&ssi->lock);
0534
0535 msg = ssip_claim_cmd(ssi);
0536 ssip_set_cmd(msg, SSIP_READY_CMD);
0537 msg->complete = ssip_release_cmd;
0538 dev_dbg(&cl->device, "Send READY\n");
0539 hsi_async_write(cl, msg);
0540 }
0541
0542 static void ssip_stop_rx(struct hsi_client *cl)
0543 {
0544 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0545
0546 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
0547 spin_lock_bh(&ssi->lock);
0548 if (likely(ssi->main_state == ACTIVE))
0549 ssip_set_rxstate(ssi, RECV_IDLE);
0550 spin_unlock_bh(&ssi->lock);
0551 }
0552
0553 static void ssip_free_strans(struct hsi_msg *msg)
0554 {
0555 ssip_free_data(msg->context);
0556 ssip_release_cmd(msg);
0557 }
0558
0559 static void ssip_strans_complete(struct hsi_msg *msg)
0560 {
0561 struct hsi_client *cl = msg->cl;
0562 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0563 struct hsi_msg *data;
0564
0565 data = msg->context;
0566 ssip_release_cmd(msg);
0567 spin_lock_bh(&ssi->lock);
0568 ssip_set_txstate(ssi, SENDING);
0569 spin_unlock_bh(&ssi->lock);
0570 hsi_async_write(cl, data);
0571 }
0572
0573 static int ssip_xmit(struct hsi_client *cl)
0574 {
0575 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0576 struct hsi_msg *msg, *dmsg;
0577 struct sk_buff *skb;
0578
0579 spin_lock_bh(&ssi->lock);
0580 if (list_empty(&ssi->txqueue)) {
0581 spin_unlock_bh(&ssi->lock);
0582 return 0;
0583 }
0584 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
0585 list_del(&dmsg->link);
0586 ssi->txqueue_len--;
0587 spin_unlock_bh(&ssi->lock);
0588
0589 msg = ssip_claim_cmd(ssi);
0590 skb = dmsg->context;
0591 msg->context = dmsg;
0592 msg->complete = ssip_strans_complete;
0593 msg->destructor = ssip_free_strans;
0594
0595 spin_lock_bh(&ssi->lock);
0596 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
0597 ssi->txid));
0598 ssi->txid++;
0599 ssip_set_txstate(ssi, SENDING);
0600 spin_unlock_bh(&ssi->lock);
0601
0602 dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
0603 SSIP_BYTES_TO_FRAMES(skb->len));
0604
0605 return hsi_async_write(cl, msg);
0606 }
0607
0608
0609 static void ssip_pn_rx(struct sk_buff *skb)
0610 {
0611 struct net_device *dev = skb->dev;
0612
0613 if (unlikely(!netif_running(dev))) {
0614 dev_dbg(&dev->dev, "Drop RX packet\n");
0615 dev->stats.rx_dropped++;
0616 dev_kfree_skb(skb);
0617 return;
0618 }
0619 if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
0620 dev_dbg(&dev->dev, "Error drop RX packet\n");
0621 dev->stats.rx_errors++;
0622 dev->stats.rx_length_errors++;
0623 dev_kfree_skb(skb);
0624 return;
0625 }
0626 dev->stats.rx_packets++;
0627 dev->stats.rx_bytes += skb->len;
0628
0629
0630 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
0631 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
0632 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
0633
0634 skb->protocol = htons(ETH_P_PHONET);
0635 skb_reset_mac_header(skb);
0636 __skb_pull(skb, 1);
0637 netif_rx(skb);
0638 }
0639
0640 static void ssip_rx_data_complete(struct hsi_msg *msg)
0641 {
0642 struct hsi_client *cl = msg->cl;
0643 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0644 struct sk_buff *skb;
0645
0646 if (msg->status == HSI_STATUS_ERROR) {
0647 dev_err(&cl->device, "RX data error\n");
0648 ssip_free_data(msg);
0649 ssip_error(cl);
0650 return;
0651 }
0652 del_timer(&ssi->rx_wd);
0653 skb = msg->context;
0654 ssip_pn_rx(skb);
0655 hsi_free_msg(msg);
0656 }
0657
0658 static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
0659 {
0660 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0661 struct hsi_msg *msg;
0662
0663
0664 if (cmd == SSIP_CMT_LOADER_SYNC)
0665 return;
0666
0667 switch (ssi->main_state) {
0668 case ACTIVE:
0669 dev_err(&cl->device, "Boot info req on active state\n");
0670 ssip_error(cl);
0671 fallthrough;
0672 case INIT:
0673 case HANDSHAKE:
0674 spin_lock_bh(&ssi->lock);
0675 ssi->main_state = HANDSHAKE;
0676 spin_unlock_bh(&ssi->lock);
0677
0678 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
0679 ssi_waketest(cl, 1);
0680
0681 spin_lock_bh(&ssi->lock);
0682
0683 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
0684 spin_unlock_bh(&ssi->lock);
0685 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
0686 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
0687 dev_warn(&cl->device, "boot info req verid mismatch\n");
0688 msg = ssip_claim_cmd(ssi);
0689 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
0690 msg->complete = ssip_release_cmd;
0691 hsi_async_write(cl, msg);
0692 break;
0693 default:
0694 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
0695 break;
0696 }
0697 }
0698
0699 static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
0700 {
0701 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0702
0703 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
0704 dev_warn(&cl->device, "boot info resp verid mismatch\n");
0705
0706 spin_lock_bh(&ssi->lock);
0707 if (ssi->main_state != ACTIVE)
0708
0709 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
0710 else
0711 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
0712 ssi->main_state);
0713 spin_unlock_bh(&ssi->lock);
0714 }
0715
0716 static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
0717 {
0718 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0719 unsigned int wkres = SSIP_PAYLOAD(cmd);
0720
0721 spin_lock_bh(&ssi->lock);
0722 if (ssi->main_state != HANDSHAKE) {
0723 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
0724 ssi->main_state);
0725 spin_unlock_bh(&ssi->lock);
0726 return;
0727 }
0728 spin_unlock_bh(&ssi->lock);
0729
0730 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
0731 ssi_waketest(cl, 0);
0732
0733 spin_lock_bh(&ssi->lock);
0734 ssi->main_state = ACTIVE;
0735 del_timer(&ssi->tx_wd);
0736 spin_unlock_bh(&ssi->lock);
0737
0738 dev_notice(&cl->device, "WAKELINES TEST %s\n",
0739 wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
0740 if (wkres & SSIP_WAKETEST_FAILED) {
0741 ssip_error(cl);
0742 return;
0743 }
0744 dev_dbg(&cl->device, "CMT is ONLINE\n");
0745 netif_wake_queue(ssi->netdev);
0746 netif_carrier_on(ssi->netdev);
0747 }
0748
0749 static void ssip_rx_ready(struct hsi_client *cl)
0750 {
0751 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0752
0753 spin_lock_bh(&ssi->lock);
0754 if (unlikely(ssi->main_state != ACTIVE)) {
0755 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
0756 ssi->send_state, ssi->main_state);
0757 spin_unlock_bh(&ssi->lock);
0758 return;
0759 }
0760 if (ssi->send_state != WAIT4READY) {
0761 dev_dbg(&cl->device, "Ignore spurious READY command\n");
0762 spin_unlock_bh(&ssi->lock);
0763 return;
0764 }
0765 ssip_set_txstate(ssi, SEND_READY);
0766 spin_unlock_bh(&ssi->lock);
0767 ssip_xmit(cl);
0768 }
0769
0770 static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
0771 {
0772 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0773 struct sk_buff *skb;
0774 struct hsi_msg *msg;
0775 int len = SSIP_PDU_LENGTH(cmd);
0776
0777 dev_dbg(&cl->device, "RX strans: %d frames\n", len);
0778 spin_lock_bh(&ssi->lock);
0779 if (unlikely(ssi->main_state != ACTIVE)) {
0780 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
0781 ssi->send_state, ssi->main_state);
0782 spin_unlock_bh(&ssi->lock);
0783 return;
0784 }
0785 ssip_set_rxstate(ssi, RECEIVING);
0786 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
0787 dev_err(&cl->device, "START TRANS id %d expected %d\n",
0788 SSIP_MSG_ID(cmd), ssi->rxid);
0789 spin_unlock_bh(&ssi->lock);
0790 goto out1;
0791 }
0792 ssi->rxid++;
0793 spin_unlock_bh(&ssi->lock);
0794 skb = netdev_alloc_skb(ssi->netdev, len * 4);
0795 if (unlikely(!skb)) {
0796 dev_err(&cl->device, "No memory for rx skb\n");
0797 goto out1;
0798 }
0799 skb->dev = ssi->netdev;
0800 skb_put(skb, len * 4);
0801 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
0802 if (unlikely(!msg)) {
0803 dev_err(&cl->device, "No memory for RX data msg\n");
0804 goto out2;
0805 }
0806 msg->complete = ssip_rx_data_complete;
0807 hsi_async_read(cl, msg);
0808
0809 return;
0810 out2:
0811 dev_kfree_skb(skb);
0812 out1:
0813 ssip_error(cl);
0814 }
0815
0816 static void ssip_rxcmd_complete(struct hsi_msg *msg)
0817 {
0818 struct hsi_client *cl = msg->cl;
0819 u32 cmd = ssip_get_cmd(msg);
0820 unsigned int cmdid = SSIP_COMMAND(cmd);
0821
0822 if (msg->status == HSI_STATUS_ERROR) {
0823 dev_err(&cl->device, "RX error detected\n");
0824 ssip_release_cmd(msg);
0825 ssip_error(cl);
0826 return;
0827 }
0828 hsi_async_read(cl, msg);
0829 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
0830 switch (cmdid) {
0831 case SSIP_SW_BREAK:
0832
0833 break;
0834 case SSIP_BOOTINFO_REQ:
0835 ssip_rx_bootinforeq(cl, cmd);
0836 break;
0837 case SSIP_BOOTINFO_RESP:
0838 ssip_rx_bootinforesp(cl, cmd);
0839 break;
0840 case SSIP_WAKETEST_RESULT:
0841 ssip_rx_waketest(cl, cmd);
0842 break;
0843 case SSIP_START_TRANS:
0844 ssip_rx_strans(cl, cmd);
0845 break;
0846 case SSIP_READY:
0847 ssip_rx_ready(cl);
0848 break;
0849 default:
0850 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
0851 break;
0852 }
0853 }
0854
0855 static void ssip_swbreak_complete(struct hsi_msg *msg)
0856 {
0857 struct hsi_client *cl = msg->cl;
0858 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0859
0860 ssip_release_cmd(msg);
0861 spin_lock_bh(&ssi->lock);
0862 if (list_empty(&ssi->txqueue)) {
0863 if (atomic_read(&ssi->tx_usecnt)) {
0864 ssip_set_txstate(ssi, SEND_READY);
0865 } else {
0866 ssip_set_txstate(ssi, SEND_IDLE);
0867 hsi_stop_tx(cl);
0868 }
0869 spin_unlock_bh(&ssi->lock);
0870 } else {
0871 spin_unlock_bh(&ssi->lock);
0872 ssip_xmit(cl);
0873 }
0874 netif_wake_queue(ssi->netdev);
0875 }
0876
0877 static void ssip_tx_data_complete(struct hsi_msg *msg)
0878 {
0879 struct hsi_client *cl = msg->cl;
0880 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0881 struct hsi_msg *cmsg;
0882
0883 if (msg->status == HSI_STATUS_ERROR) {
0884 dev_err(&cl->device, "TX data error\n");
0885 ssip_error(cl);
0886 goto out;
0887 }
0888 spin_lock_bh(&ssi->lock);
0889 if (list_empty(&ssi->txqueue)) {
0890 ssip_set_txstate(ssi, SENDING_SWBREAK);
0891 spin_unlock_bh(&ssi->lock);
0892 cmsg = ssip_claim_cmd(ssi);
0893 ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
0894 cmsg->complete = ssip_swbreak_complete;
0895 dev_dbg(&cl->device, "Send SWBREAK\n");
0896 hsi_async_write(cl, cmsg);
0897 } else {
0898 spin_unlock_bh(&ssi->lock);
0899 ssip_xmit(cl);
0900 }
0901 out:
0902 ssip_free_data(msg);
0903 }
0904
0905 static void ssip_port_event(struct hsi_client *cl, unsigned long event)
0906 {
0907 switch (event) {
0908 case HSI_EVENT_START_RX:
0909 ssip_start_rx(cl);
0910 break;
0911 case HSI_EVENT_STOP_RX:
0912 ssip_stop_rx(cl);
0913 break;
0914 default:
0915 return;
0916 }
0917 }
0918
0919 static int ssip_pn_open(struct net_device *dev)
0920 {
0921 struct hsi_client *cl = to_hsi_client(dev->dev.parent);
0922 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0923 int err;
0924
0925 err = hsi_claim_port(cl, 1);
0926 if (err < 0) {
0927 dev_err(&cl->device, "SSI port already claimed\n");
0928 return err;
0929 }
0930 err = hsi_register_port_event(cl, ssip_port_event);
0931 if (err < 0) {
0932 dev_err(&cl->device, "Register HSI port event failed (%d)\n",
0933 err);
0934 return err;
0935 }
0936 dev_dbg(&cl->device, "Configuring SSI port\n");
0937 hsi_setup(cl);
0938
0939 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
0940 ssi_waketest(cl, 1);
0941
0942 spin_lock_bh(&ssi->lock);
0943 ssi->main_state = HANDSHAKE;
0944 spin_unlock_bh(&ssi->lock);
0945
0946 ssip_send_bootinfo_req_cmd(cl);
0947
0948 return 0;
0949 }
0950
0951 static int ssip_pn_stop(struct net_device *dev)
0952 {
0953 struct hsi_client *cl = to_hsi_client(dev->dev.parent);
0954
0955 ssip_reset(cl);
0956 hsi_unregister_port_event(cl);
0957 hsi_release_port(cl);
0958
0959 return 0;
0960 }
0961
0962 static void ssip_xmit_work(struct work_struct *work)
0963 {
0964 struct ssi_protocol *ssi =
0965 container_of(work, struct ssi_protocol, work);
0966 struct hsi_client *cl = ssi->cl;
0967
0968 ssip_xmit(cl);
0969 }
0970
0971 static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
0972 {
0973 struct hsi_client *cl = to_hsi_client(dev->dev.parent);
0974 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
0975 struct hsi_msg *msg;
0976
0977 if ((skb->protocol != htons(ETH_P_PHONET)) ||
0978 (skb->len < SSIP_MIN_PN_HDR))
0979 goto drop;
0980
0981 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
0982 goto inc_dropped;
0983
0984
0985
0986
0987
0988 if (skb_cow_head(skb, 0))
0989 goto drop;
0990
0991
0992 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
0993
0994 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
0995 if (!msg) {
0996 dev_dbg(&cl->device, "Dropping tx data: No memory\n");
0997 goto drop;
0998 }
0999 msg->complete = ssip_tx_data_complete;
1000
1001 spin_lock_bh(&ssi->lock);
1002 if (unlikely(ssi->main_state != ACTIVE)) {
1003 spin_unlock_bh(&ssi->lock);
1004 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
1005 goto drop2;
1006 }
1007 list_add_tail(&msg->link, &ssi->txqueue);
1008 ssi->txqueue_len++;
1009 if (dev->tx_queue_len < ssi->txqueue_len) {
1010 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
1011 netif_stop_queue(dev);
1012 }
1013 if (ssi->send_state == SEND_IDLE) {
1014 ssip_set_txstate(ssi, WAIT4READY);
1015 spin_unlock_bh(&ssi->lock);
1016 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
1017 hsi_start_tx(cl);
1018 } else if (ssi->send_state == SEND_READY) {
1019
1020 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
1021 ssi->txqueue_len);
1022 spin_unlock_bh(&ssi->lock);
1023 schedule_work(&ssi->work);
1024 } else {
1025 spin_unlock_bh(&ssi->lock);
1026 }
1027 dev->stats.tx_packets++;
1028 dev->stats.tx_bytes += skb->len;
1029
1030 return 0;
1031 drop2:
1032 hsi_free_msg(msg);
1033 drop:
1034 dev_kfree_skb(skb);
1035 inc_dropped:
1036 dev->stats.tx_dropped++;
1037
1038 return 0;
1039 }
1040
1041
1042 void ssip_reset_event(struct hsi_client *master)
1043 {
1044 struct ssi_protocol *ssi = hsi_client_drvdata(master);
1045 dev_err(&ssi->cl->device, "CMT reset detected!\n");
1046 ssip_error(ssi->cl);
1047 }
1048 EXPORT_SYMBOL_GPL(ssip_reset_event);
1049
1050 static const struct net_device_ops ssip_pn_ops = {
1051 .ndo_open = ssip_pn_open,
1052 .ndo_stop = ssip_pn_stop,
1053 .ndo_start_xmit = ssip_pn_xmit,
1054 };
1055
1056 static void ssip_pn_setup(struct net_device *dev)
1057 {
1058 static const u8 addr = PN_MEDIA_SOS;
1059
1060 dev->features = 0;
1061 dev->netdev_ops = &ssip_pn_ops;
1062 dev->type = ARPHRD_PHONET;
1063 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1064 dev->mtu = SSIP_DEFAULT_MTU;
1065 dev->hard_header_len = 1;
1066 dev->addr_len = 1;
1067 dev_addr_set(dev, &addr);
1068 dev->tx_queue_len = SSIP_TXQUEUE_LEN;
1069
1070 dev->needs_free_netdev = true;
1071 dev->header_ops = &phonet_header_ops;
1072 }
1073
1074 static int ssi_protocol_probe(struct device *dev)
1075 {
1076 static const char ifname[] = "phonet%d";
1077 struct hsi_client *cl = to_hsi_client(dev);
1078 struct ssi_protocol *ssi;
1079 int err;
1080
1081 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
1082 if (!ssi)
1083 return -ENOMEM;
1084
1085 spin_lock_init(&ssi->lock);
1086 timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE);
1087 timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE);
1088 timer_setup(&ssi->keep_alive, ssip_keep_alive, 0);
1089 INIT_LIST_HEAD(&ssi->txqueue);
1090 INIT_LIST_HEAD(&ssi->cmdqueue);
1091 atomic_set(&ssi->tx_usecnt, 0);
1092 hsi_client_set_drvdata(cl, ssi);
1093 ssi->cl = cl;
1094 INIT_WORK(&ssi->work, ssip_xmit_work);
1095
1096 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
1097 if (ssi->channel_id_cmd < 0) {
1098 err = ssi->channel_id_cmd;
1099 dev_err(dev, "Could not get cmd channel (%d)\n", err);
1100 goto out;
1101 }
1102
1103 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
1104 if (ssi->channel_id_data < 0) {
1105 err = ssi->channel_id_data;
1106 dev_err(dev, "Could not get data channel (%d)\n", err);
1107 goto out;
1108 }
1109
1110 err = ssip_alloc_cmds(ssi);
1111 if (err < 0) {
1112 dev_err(dev, "No memory for commands\n");
1113 goto out;
1114 }
1115
1116 ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
1117 if (!ssi->netdev) {
1118 dev_err(dev, "No memory for netdev\n");
1119 err = -ENOMEM;
1120 goto out1;
1121 }
1122
1123
1124 ssi->netdev->min_mtu = PHONET_MIN_MTU;
1125 ssi->netdev->max_mtu = SSIP_MAX_MTU;
1126
1127 SET_NETDEV_DEV(ssi->netdev, dev);
1128 netif_carrier_off(ssi->netdev);
1129 err = register_netdev(ssi->netdev);
1130 if (err < 0) {
1131 dev_err(dev, "Register netdev failed (%d)\n", err);
1132 goto out2;
1133 }
1134
1135 list_add(&ssi->link, &ssip_list);
1136
1137 dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
1138 ssi->channel_id_cmd, ssi->channel_id_data);
1139
1140 return 0;
1141 out2:
1142 free_netdev(ssi->netdev);
1143 out1:
1144 ssip_free_cmds(ssi);
1145 out:
1146 kfree(ssi);
1147
1148 return err;
1149 }
1150
1151 static int ssi_protocol_remove(struct device *dev)
1152 {
1153 struct hsi_client *cl = to_hsi_client(dev);
1154 struct ssi_protocol *ssi = hsi_client_drvdata(cl);
1155
1156 list_del(&ssi->link);
1157 unregister_netdev(ssi->netdev);
1158 ssip_free_cmds(ssi);
1159 hsi_client_set_drvdata(cl, NULL);
1160 kfree(ssi);
1161
1162 return 0;
1163 }
1164
1165 static struct hsi_client_driver ssip_driver = {
1166 .driver = {
1167 .name = "ssi-protocol",
1168 .owner = THIS_MODULE,
1169 .probe = ssi_protocol_probe,
1170 .remove = ssi_protocol_remove,
1171 },
1172 };
1173
1174 static int __init ssip_init(void)
1175 {
1176 pr_info("SSI protocol aka McSAAB added\n");
1177
1178 return hsi_register_client_driver(&ssip_driver);
1179 }
1180 module_init(ssip_init);
1181
1182 static void __exit ssip_exit(void)
1183 {
1184 hsi_unregister_client_driver(&ssip_driver);
1185 pr_info("SSI protocol driver removed\n");
1186 }
1187 module_exit(ssip_exit);
1188
1189 MODULE_ALIAS("hsi:ssi-protocol");
1190 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
1191 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
1192 MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
1193 MODULE_LICENSE("GPL");