0001
0002
0003
0004 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0005
0006 #include <linux/module.h>
0007 #include <linux/kernel.h>
0008 #include <linux/usb.h>
0009 #include <linux/sched.h>
0010 #include <linux/kthread.h>
0011 #include <linux/usb/cdc.h>
0012 #include <linux/wait.h>
0013 #include <linux/if_ether.h>
0014 #include <linux/pm_runtime.h>
0015
0016 #include "gdm_usb.h"
0017 #include "gdm_lte.h"
0018 #include "hci.h"
0019 #include "hci_packet.h"
0020 #include "gdm_endian.h"
0021
0022 #define USB_DEVICE_CDC_DATA(vid, pid) \
0023 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
0024 USB_DEVICE_ID_MATCH_INT_CLASS | \
0025 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
0026 .idVendor = vid,\
0027 .idProduct = pid,\
0028 .bInterfaceClass = USB_CLASS_COMM,\
0029 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
0030
0031 #define USB_DEVICE_MASS_DATA(vid, pid) \
0032 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
0033 USB_DEVICE_ID_MATCH_INT_INFO,\
0034 .idVendor = vid,\
0035 .idProduct = pid,\
0036 .bInterfaceSubClass = USB_SC_SCSI, \
0037 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
0038 .bInterfaceProtocol = USB_PR_BULK
0039
0040 static const struct usb_device_id id_table[] = {
0041 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) },
0042 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) },
0043 { }
0044 };
0045
0046 MODULE_DEVICE_TABLE(usb, id_table);
0047
0048 static void do_tx(struct work_struct *work);
0049 static void do_rx(struct work_struct *work);
0050
0051 static int gdm_usb_recv(void *priv_dev,
0052 int (*cb)(void *cb_data,
0053 void *data, int len, int context),
0054 void *cb_data,
0055 int context);
0056
0057 static int request_mac_address(struct lte_udev *udev)
0058 {
0059 struct hci_packet *hci;
0060 struct usb_device *usbdev = udev->usbdev;
0061 int actual;
0062 int ret = -1;
0063
0064 hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
0065 if (!hci)
0066 return -ENOMEM;
0067
0068 hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
0069 hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
0070 hci->data[0] = MAC_ADDRESS;
0071
0072 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
0073 &actual, 1000);
0074
0075 udev->request_mac_addr = 1;
0076 kfree(hci);
0077
0078 return ret;
0079 }
0080
0081 static struct usb_tx *alloc_tx_struct(int len)
0082 {
0083 struct usb_tx *t = NULL;
0084 int ret = 0;
0085
0086 t = kzalloc(sizeof(*t), GFP_ATOMIC);
0087 if (!t) {
0088 ret = -ENOMEM;
0089 goto out;
0090 }
0091
0092 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
0093 if (!(len % 512))
0094 len++;
0095
0096 t->buf = kmalloc(len, GFP_ATOMIC);
0097 if (!t->urb || !t->buf) {
0098 ret = -ENOMEM;
0099 goto out;
0100 }
0101
0102 out:
0103 if (ret < 0) {
0104 if (t) {
0105 usb_free_urb(t->urb);
0106 kfree(t->buf);
0107 kfree(t);
0108 }
0109 return NULL;
0110 }
0111
0112 return t;
0113 }
0114
0115 static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
0116 {
0117 struct usb_tx_sdu *t_sdu;
0118
0119 t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
0120 if (!t_sdu)
0121 return NULL;
0122
0123 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
0124 if (!t_sdu->buf) {
0125 kfree(t_sdu);
0126 return NULL;
0127 }
0128
0129 return t_sdu;
0130 }
0131
0132 static void free_tx_struct(struct usb_tx *t)
0133 {
0134 if (t) {
0135 usb_free_urb(t->urb);
0136 kfree(t->buf);
0137 kfree(t);
0138 }
0139 }
0140
0141 static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
0142 {
0143 if (t_sdu) {
0144 kfree(t_sdu->buf);
0145 kfree(t_sdu);
0146 }
0147 }
0148
0149 static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
0150 {
0151 struct usb_tx_sdu *t_sdu;
0152
0153 if (list_empty(&tx->free_list))
0154 return NULL;
0155
0156 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
0157 list_del(&t_sdu->list);
0158
0159 tx->avail_count--;
0160
0161 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
0162
0163 return t_sdu;
0164 }
0165
0166 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
0167 {
0168 list_add_tail(&t_sdu->list, &tx->free_list);
0169 tx->avail_count++;
0170 }
0171
0172 static struct usb_rx *alloc_rx_struct(void)
0173 {
0174 struct usb_rx *r = NULL;
0175 int ret = 0;
0176
0177 r = kmalloc(sizeof(*r), GFP_KERNEL);
0178 if (!r) {
0179 ret = -ENOMEM;
0180 goto out;
0181 }
0182
0183 r->urb = usb_alloc_urb(0, GFP_KERNEL);
0184 r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
0185 if (!r->urb || !r->buf) {
0186 ret = -ENOMEM;
0187 goto out;
0188 }
0189 out:
0190
0191 if (ret < 0) {
0192 if (r) {
0193 usb_free_urb(r->urb);
0194 kfree(r->buf);
0195 kfree(r);
0196 }
0197 return NULL;
0198 }
0199
0200 return r;
0201 }
0202
0203 static void free_rx_struct(struct usb_rx *r)
0204 {
0205 if (r) {
0206 usb_free_urb(r->urb);
0207 kfree(r->buf);
0208 kfree(r);
0209 }
0210 }
0211
0212 static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
0213 {
0214 struct usb_rx *r;
0215 unsigned long flags;
0216
0217 spin_lock_irqsave(&rx->rx_lock, flags);
0218
0219 if (list_empty(&rx->free_list)) {
0220 spin_unlock_irqrestore(&rx->rx_lock, flags);
0221 return NULL;
0222 }
0223
0224 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
0225 list_del(&r->free_list);
0226
0227 rx->avail_count--;
0228
0229 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
0230
0231 spin_unlock_irqrestore(&rx->rx_lock, flags);
0232
0233 return r;
0234 }
0235
0236 static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
0237 {
0238 unsigned long flags;
0239
0240 spin_lock_irqsave(&rx->rx_lock, flags);
0241
0242 list_add_tail(&r->free_list, &rx->free_list);
0243 rx->avail_count++;
0244
0245 spin_unlock_irqrestore(&rx->rx_lock, flags);
0246 }
0247
0248 static void release_usb(struct lte_udev *udev)
0249 {
0250 struct rx_cxt *rx = &udev->rx;
0251 struct tx_cxt *tx = &udev->tx;
0252 struct usb_tx *t, *t_next;
0253 struct usb_rx *r, *r_next;
0254 struct usb_tx_sdu *t_sdu, *t_sdu_next;
0255 unsigned long flags;
0256
0257 spin_lock_irqsave(&tx->lock, flags);
0258 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
0259 list_del(&t_sdu->list);
0260 free_tx_sdu_struct(t_sdu);
0261 }
0262
0263 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
0264 list_del(&t->list);
0265 free_tx_struct(t);
0266 }
0267
0268 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
0269 list_del(&t_sdu->list);
0270 free_tx_sdu_struct(t_sdu);
0271 }
0272 spin_unlock_irqrestore(&tx->lock, flags);
0273
0274 spin_lock_irqsave(&rx->submit_lock, flags);
0275 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
0276 rx_submit_list) {
0277 spin_unlock_irqrestore(&rx->submit_lock, flags);
0278 usb_kill_urb(r->urb);
0279 spin_lock_irqsave(&rx->submit_lock, flags);
0280 }
0281 spin_unlock_irqrestore(&rx->submit_lock, flags);
0282
0283 spin_lock_irqsave(&rx->rx_lock, flags);
0284 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
0285 list_del(&r->free_list);
0286 free_rx_struct(r);
0287 }
0288 spin_unlock_irqrestore(&rx->rx_lock, flags);
0289
0290 spin_lock_irqsave(&rx->to_host_lock, flags);
0291 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
0292 if (r->index == (void *)udev) {
0293 list_del(&r->to_host_list);
0294 free_rx_struct(r);
0295 }
0296 }
0297 spin_unlock_irqrestore(&rx->to_host_lock, flags);
0298 }
0299
0300 static int init_usb(struct lte_udev *udev)
0301 {
0302 int ret = 0;
0303 int i;
0304 struct tx_cxt *tx = &udev->tx;
0305 struct rx_cxt *rx = &udev->rx;
0306 struct usb_tx_sdu *t_sdu = NULL;
0307 struct usb_rx *r = NULL;
0308
0309 udev->send_complete = 1;
0310 udev->tx_stop = 0;
0311 udev->request_mac_addr = 0;
0312 udev->usb_state = PM_NORMAL;
0313
0314 INIT_LIST_HEAD(&tx->sdu_list);
0315 INIT_LIST_HEAD(&tx->hci_list);
0316 INIT_LIST_HEAD(&tx->free_list);
0317 INIT_LIST_HEAD(&rx->rx_submit_list);
0318 INIT_LIST_HEAD(&rx->free_list);
0319 INIT_LIST_HEAD(&rx->to_host_list);
0320 spin_lock_init(&tx->lock);
0321 spin_lock_init(&rx->rx_lock);
0322 spin_lock_init(&rx->submit_lock);
0323 spin_lock_init(&rx->to_host_lock);
0324
0325 tx->avail_count = 0;
0326 rx->avail_count = 0;
0327
0328 udev->rx_cb = NULL;
0329
0330 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
0331 t_sdu = alloc_tx_sdu_struct();
0332 if (!t_sdu) {
0333 ret = -ENOMEM;
0334 goto fail;
0335 }
0336
0337 list_add(&t_sdu->list, &tx->free_list);
0338 tx->avail_count++;
0339 }
0340
0341 for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
0342 r = alloc_rx_struct();
0343 if (!r) {
0344 ret = -ENOMEM;
0345 goto fail;
0346 }
0347
0348 list_add(&r->free_list, &rx->free_list);
0349 rx->avail_count++;
0350 }
0351 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
0352 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
0353 return 0;
0354 fail:
0355 release_usb(udev);
0356 return ret;
0357 }
0358
0359 static int set_mac_address(u8 *data, void *arg)
0360 {
0361 struct phy_dev *phy_dev = arg;
0362 struct lte_udev *udev = phy_dev->priv_dev;
0363 struct tlv *tlv = (struct tlv *)data;
0364 u8 mac_address[ETH_ALEN] = {0, };
0365
0366 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
0367 memcpy(mac_address, tlv->data, tlv->len);
0368
0369 if (register_lte_device(phy_dev,
0370 &udev->intf->dev, mac_address) < 0)
0371 pr_err("register lte device failed\n");
0372
0373 udev->request_mac_addr = 0;
0374
0375 return 1;
0376 }
0377
0378 return 0;
0379 }
0380
0381 static void do_rx(struct work_struct *work)
0382 {
0383 struct lte_udev *udev =
0384 container_of(work, struct lte_udev, work_rx.work);
0385 struct rx_cxt *rx = &udev->rx;
0386 struct usb_rx *r;
0387 struct hci_packet *hci;
0388 struct phy_dev *phy_dev;
0389 u16 cmd_evt;
0390 int ret;
0391 unsigned long flags;
0392
0393 while (1) {
0394 spin_lock_irqsave(&rx->to_host_lock, flags);
0395 if (list_empty(&rx->to_host_list)) {
0396 spin_unlock_irqrestore(&rx->to_host_lock, flags);
0397 break;
0398 }
0399 r = list_entry(rx->to_host_list.next,
0400 struct usb_rx, to_host_list);
0401 list_del(&r->to_host_list);
0402 spin_unlock_irqrestore(&rx->to_host_lock, flags);
0403
0404 phy_dev = r->cb_data;
0405 udev = phy_dev->priv_dev;
0406 hci = (struct hci_packet *)r->buf;
0407 cmd_evt = gdm_dev16_to_cpu(udev->gdm_ed, hci->cmd_evt);
0408
0409 switch (cmd_evt) {
0410 case LTE_GET_INFORMATION_RESULT:
0411 if (set_mac_address(hci->data, r->cb_data) == 0) {
0412 r->callback(r->cb_data,
0413 r->buf,
0414 r->urb->actual_length,
0415 KERNEL_THREAD);
0416 }
0417 break;
0418
0419 default:
0420 if (r->callback) {
0421 ret = r->callback(r->cb_data,
0422 r->buf,
0423 r->urb->actual_length,
0424 KERNEL_THREAD);
0425
0426 if (ret == -EAGAIN)
0427 pr_err("failed to send received data\n");
0428 }
0429 break;
0430 }
0431
0432 put_rx_struct(rx, r);
0433
0434 gdm_usb_recv(udev,
0435 r->callback,
0436 r->cb_data,
0437 USB_COMPLETE);
0438 }
0439 }
0440
0441 static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
0442 {
0443 unsigned long flags;
0444 struct usb_rx *r_remove, *r_remove_next;
0445
0446 spin_lock_irqsave(&rx->submit_lock, flags);
0447 list_for_each_entry_safe(r_remove, r_remove_next,
0448 &rx->rx_submit_list, rx_submit_list) {
0449 if (r == r_remove) {
0450 list_del(&r->rx_submit_list);
0451 break;
0452 }
0453 }
0454 spin_unlock_irqrestore(&rx->submit_lock, flags);
0455 }
0456
0457 static void gdm_usb_rcv_complete(struct urb *urb)
0458 {
0459 struct usb_rx *r = urb->context;
0460 struct rx_cxt *rx = r->rx;
0461 unsigned long flags;
0462 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
0463 struct usb_device *usbdev = udev->usbdev;
0464
0465 remove_rx_submit_list(r, rx);
0466
0467 if (!urb->status && r->callback) {
0468 spin_lock_irqsave(&rx->to_host_lock, flags);
0469 list_add_tail(&r->to_host_list, &rx->to_host_list);
0470 schedule_work(&udev->work_rx.work);
0471 spin_unlock_irqrestore(&rx->to_host_lock, flags);
0472 } else {
0473 if (urb->status && udev->usb_state == PM_NORMAL)
0474 dev_err(&urb->dev->dev, "%s: urb status error %d\n",
0475 __func__, urb->status);
0476
0477 put_rx_struct(rx, r);
0478 }
0479
0480 usb_mark_last_busy(usbdev);
0481 }
0482
0483 static int gdm_usb_recv(void *priv_dev,
0484 int (*cb)(void *cb_data,
0485 void *data, int len, int context),
0486 void *cb_data,
0487 int context)
0488 {
0489 struct lte_udev *udev = priv_dev;
0490 struct usb_device *usbdev = udev->usbdev;
0491 struct rx_cxt *rx = &udev->rx;
0492 struct usb_rx *r;
0493 int no_spc;
0494 int ret;
0495 unsigned long flags;
0496
0497 if (!udev->usbdev) {
0498 pr_err("invalid device\n");
0499 return -ENODEV;
0500 }
0501
0502 r = get_rx_struct(rx, &no_spc);
0503 if (!r) {
0504 pr_err("Out of Memory\n");
0505 return -ENOMEM;
0506 }
0507
0508 udev->rx_cb = cb;
0509 r->callback = cb;
0510 r->cb_data = cb_data;
0511 r->index = (void *)udev;
0512 r->rx = rx;
0513
0514 usb_fill_bulk_urb(r->urb,
0515 usbdev,
0516 usb_rcvbulkpipe(usbdev, 0x83),
0517 r->buf,
0518 RX_BUF_SIZE,
0519 gdm_usb_rcv_complete,
0520 r);
0521
0522 spin_lock_irqsave(&rx->submit_lock, flags);
0523 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
0524 spin_unlock_irqrestore(&rx->submit_lock, flags);
0525
0526 if (context == KERNEL_THREAD)
0527 ret = usb_submit_urb(r->urb, GFP_KERNEL);
0528 else
0529 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
0530
0531 if (ret) {
0532 spin_lock_irqsave(&rx->submit_lock, flags);
0533 list_del(&r->rx_submit_list);
0534 spin_unlock_irqrestore(&rx->submit_lock, flags);
0535
0536 pr_err("usb_submit_urb failed (%p)\n", r);
0537 put_rx_struct(rx, r);
0538 }
0539
0540 return ret;
0541 }
0542
0543 static void gdm_usb_send_complete(struct urb *urb)
0544 {
0545 struct usb_tx *t = urb->context;
0546 struct tx_cxt *tx = t->tx;
0547 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
0548 unsigned long flags;
0549
0550 if (urb->status == -ECONNRESET) {
0551 dev_info(&urb->dev->dev, "CONNRESET\n");
0552 return;
0553 }
0554
0555 if (t->callback)
0556 t->callback(t->cb_data);
0557
0558 free_tx_struct(t);
0559
0560 spin_lock_irqsave(&tx->lock, flags);
0561 udev->send_complete = 1;
0562 schedule_work(&udev->work_tx.work);
0563 spin_unlock_irqrestore(&tx->lock, flags);
0564 }
0565
0566 static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
0567 {
0568 int ret = 0;
0569
0570 if (!(len % 512))
0571 len++;
0572
0573 usb_fill_bulk_urb(t->urb,
0574 usbdev,
0575 usb_sndbulkpipe(usbdev, 2),
0576 t->buf,
0577 len,
0578 gdm_usb_send_complete,
0579 t);
0580
0581 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
0582
0583 if (ret)
0584 dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
0585 ret);
0586
0587 usb_mark_last_busy(usbdev);
0588
0589 return ret;
0590 }
0591
0592 static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
0593 {
0594 struct tx_cxt *tx = &udev->tx;
0595 struct usb_tx_sdu *t_sdu = NULL;
0596 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
0597 u16 send_len = 0;
0598 u16 num_packet = 0;
0599 unsigned long flags;
0600
0601 multi_sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_MULTI_SDU);
0602
0603 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
0604 spin_lock_irqsave(&tx->lock, flags);
0605 if (list_empty(&tx->sdu_list)) {
0606 spin_unlock_irqrestore(&tx->lock, flags);
0607 break;
0608 }
0609
0610 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
0611 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
0612 spin_unlock_irqrestore(&tx->lock, flags);
0613 break;
0614 }
0615
0616 list_del(&t_sdu->list);
0617 spin_unlock_irqrestore(&tx->lock, flags);
0618
0619 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
0620
0621 send_len += (t_sdu->len + 3) & 0xfffc;
0622 num_packet++;
0623
0624 if (tx->avail_count > 10)
0625 t_sdu->callback(t_sdu->cb_data);
0626
0627 spin_lock_irqsave(&tx->lock, flags);
0628 put_tx_struct(tx, t_sdu);
0629 spin_unlock_irqrestore(&tx->lock, flags);
0630 }
0631
0632 multi_sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
0633 multi_sdu->num_packet = gdm_cpu_to_dev16(udev->gdm_ed, num_packet);
0634
0635 return send_len + offsetof(struct multi_sdu, data);
0636 }
0637
0638 static void do_tx(struct work_struct *work)
0639 {
0640 struct lte_udev *udev =
0641 container_of(work, struct lte_udev, work_tx.work);
0642 struct usb_device *usbdev = udev->usbdev;
0643 struct tx_cxt *tx = &udev->tx;
0644 struct usb_tx *t = NULL;
0645 int is_send = 0;
0646 u32 len = 0;
0647 unsigned long flags;
0648
0649 if (!usb_autopm_get_interface(udev->intf))
0650 usb_autopm_put_interface(udev->intf);
0651
0652 if (udev->usb_state == PM_SUSPEND)
0653 return;
0654
0655 spin_lock_irqsave(&tx->lock, flags);
0656 if (!udev->send_complete) {
0657 spin_unlock_irqrestore(&tx->lock, flags);
0658 return;
0659 }
0660 udev->send_complete = 0;
0661
0662 if (!list_empty(&tx->hci_list)) {
0663 t = list_entry(tx->hci_list.next, struct usb_tx, list);
0664 list_del(&t->list);
0665 len = t->len;
0666 t->is_sdu = 0;
0667 is_send = 1;
0668 } else if (!list_empty(&tx->sdu_list)) {
0669 if (udev->tx_stop) {
0670 udev->send_complete = 1;
0671 spin_unlock_irqrestore(&tx->lock, flags);
0672 return;
0673 }
0674
0675 t = alloc_tx_struct(TX_BUF_SIZE);
0676 if (!t) {
0677 spin_unlock_irqrestore(&tx->lock, flags);
0678 return;
0679 }
0680 t->callback = NULL;
0681 t->tx = tx;
0682 t->is_sdu = 1;
0683 is_send = 1;
0684 }
0685
0686 if (!is_send) {
0687 udev->send_complete = 1;
0688 spin_unlock_irqrestore(&tx->lock, flags);
0689 return;
0690 }
0691 spin_unlock_irqrestore(&tx->lock, flags);
0692
0693 if (t->is_sdu)
0694 len = packet_aggregation(udev, t->buf);
0695
0696 if (send_tx_packet(usbdev, t, len)) {
0697 pr_err("send_tx_packet failed\n");
0698 t->callback = NULL;
0699 gdm_usb_send_complete(t->urb);
0700 }
0701 }
0702
0703 #define SDU_PARAM_LEN 12
0704 static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
0705 unsigned int dft_eps_ID, unsigned int eps_ID,
0706 void (*cb)(void *data), void *cb_data,
0707 int dev_idx, int nic_type)
0708 {
0709 struct lte_udev *udev = priv_dev;
0710 struct tx_cxt *tx = &udev->tx;
0711 struct usb_tx_sdu *t_sdu;
0712 struct sdu *sdu = NULL;
0713 unsigned long flags;
0714 int no_spc = 0;
0715 u16 send_len;
0716
0717 if (!udev->usbdev) {
0718 pr_err("sdu send - invalid device\n");
0719 return TX_NO_DEV;
0720 }
0721
0722 spin_lock_irqsave(&tx->lock, flags);
0723 t_sdu = get_tx_sdu_struct(tx, &no_spc);
0724 spin_unlock_irqrestore(&tx->lock, flags);
0725
0726 if (!t_sdu) {
0727 pr_err("sdu send - free list empty\n");
0728 return TX_NO_SPC;
0729 }
0730
0731 sdu = (struct sdu *)t_sdu->buf;
0732 sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_SDU);
0733 if (nic_type == NIC_TYPE_ARP) {
0734 send_len = len + SDU_PARAM_LEN;
0735 memcpy(sdu->data, data, len);
0736 } else {
0737 send_len = len - ETH_HLEN;
0738 send_len += SDU_PARAM_LEN;
0739 memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
0740 }
0741
0742 sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
0743 sdu->dft_eps_ID = gdm_cpu_to_dev32(udev->gdm_ed, dft_eps_ID);
0744 sdu->bearer_ID = gdm_cpu_to_dev32(udev->gdm_ed, eps_ID);
0745 sdu->nic_type = gdm_cpu_to_dev32(udev->gdm_ed, nic_type);
0746
0747 t_sdu->len = send_len + HCI_HEADER_SIZE;
0748 t_sdu->callback = cb;
0749 t_sdu->cb_data = cb_data;
0750
0751 spin_lock_irqsave(&tx->lock, flags);
0752 list_add_tail(&t_sdu->list, &tx->sdu_list);
0753 schedule_work(&udev->work_tx.work);
0754 spin_unlock_irqrestore(&tx->lock, flags);
0755
0756 if (no_spc)
0757 return TX_NO_BUFFER;
0758
0759 return 0;
0760 }
0761
0762 static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
0763 void (*cb)(void *data), void *cb_data)
0764 {
0765 struct lte_udev *udev = priv_dev;
0766 struct tx_cxt *tx = &udev->tx;
0767 struct usb_tx *t;
0768 unsigned long flags;
0769
0770 if (!udev->usbdev) {
0771 pr_err("hci send - invalid device\n");
0772 return -ENODEV;
0773 }
0774
0775 t = alloc_tx_struct(len);
0776 if (!t) {
0777 pr_err("hci_send - out of memory\n");
0778 return -ENOMEM;
0779 }
0780
0781 memcpy(t->buf, data, len);
0782 t->callback = cb;
0783 t->cb_data = cb_data;
0784 t->len = len;
0785 t->tx = tx;
0786 t->is_sdu = 0;
0787
0788 spin_lock_irqsave(&tx->lock, flags);
0789 list_add_tail(&t->list, &tx->hci_list);
0790 schedule_work(&udev->work_tx.work);
0791 spin_unlock_irqrestore(&tx->lock, flags);
0792
0793 return 0;
0794 }
0795
0796 static u8 gdm_usb_get_endian(void *priv_dev)
0797 {
0798 struct lte_udev *udev = priv_dev;
0799
0800 return udev->gdm_ed;
0801 }
0802
0803 static int gdm_usb_probe(struct usb_interface *intf,
0804 const struct usb_device_id *id)
0805 {
0806 int ret = 0;
0807 struct phy_dev *phy_dev = NULL;
0808 struct lte_udev *udev = NULL;
0809 u16 idVendor, idProduct;
0810 int bInterfaceNumber;
0811 struct usb_device *usbdev = interface_to_usbdev(intf);
0812
0813 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
0814 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
0815 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
0816
0817 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
0818
0819 if (bInterfaceNumber > NETWORK_INTERFACE) {
0820 pr_info("not a network device\n");
0821 return -ENODEV;
0822 }
0823
0824 phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
0825 if (!phy_dev)
0826 return -ENOMEM;
0827
0828 udev = kzalloc(sizeof(*udev), GFP_KERNEL);
0829 if (!udev) {
0830 ret = -ENOMEM;
0831 goto err_udev;
0832 }
0833
0834 phy_dev->priv_dev = (void *)udev;
0835 phy_dev->send_hci_func = gdm_usb_hci_send;
0836 phy_dev->send_sdu_func = gdm_usb_sdu_send;
0837 phy_dev->rcv_func = gdm_usb_recv;
0838 phy_dev->get_endian = gdm_usb_get_endian;
0839
0840 udev->usbdev = usbdev;
0841 ret = init_usb(udev);
0842 if (ret < 0) {
0843 dev_err(intf->usb_dev, "init_usb func failed\n");
0844 goto err_init_usb;
0845 }
0846 udev->intf = intf;
0847
0848 intf->needs_remote_wakeup = 1;
0849 usb_enable_autosuspend(usbdev);
0850 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
0851
0852
0853
0854
0855 if (idProduct == PID_GDM7243)
0856 udev->gdm_ed = ENDIANNESS_BIG;
0857 else
0858 udev->gdm_ed = ENDIANNESS_LITTLE;
0859
0860 ret = request_mac_address(udev);
0861 if (ret < 0) {
0862 dev_err(intf->usb_dev, "request Mac address failed\n");
0863 goto err_mac_address;
0864 }
0865
0866 start_rx_proc(phy_dev);
0867 usb_get_dev(usbdev);
0868 usb_set_intfdata(intf, phy_dev);
0869
0870 return 0;
0871
0872 err_mac_address:
0873 release_usb(udev);
0874 err_init_usb:
0875 kfree(udev);
0876 err_udev:
0877 kfree(phy_dev);
0878
0879 return ret;
0880 }
0881
0882 static void gdm_usb_disconnect(struct usb_interface *intf)
0883 {
0884 struct phy_dev *phy_dev;
0885 struct lte_udev *udev;
0886 struct usb_device *usbdev;
0887
0888 usbdev = interface_to_usbdev(intf);
0889 phy_dev = usb_get_intfdata(intf);
0890
0891 udev = phy_dev->priv_dev;
0892 unregister_lte_device(phy_dev);
0893
0894 release_usb(udev);
0895
0896 kfree(udev);
0897 udev = NULL;
0898
0899 kfree(phy_dev);
0900 phy_dev = NULL;
0901
0902 usb_put_dev(usbdev);
0903 }
0904
0905 static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
0906 {
0907 struct phy_dev *phy_dev;
0908 struct lte_udev *udev;
0909 struct rx_cxt *rx;
0910 struct usb_rx *r;
0911 struct usb_rx *r_next;
0912 unsigned long flags;
0913
0914 phy_dev = usb_get_intfdata(intf);
0915 udev = phy_dev->priv_dev;
0916 rx = &udev->rx;
0917 if (udev->usb_state != PM_NORMAL) {
0918 dev_err(intf->usb_dev, "usb suspend - invalid state\n");
0919 return -1;
0920 }
0921
0922 udev->usb_state = PM_SUSPEND;
0923
0924 spin_lock_irqsave(&rx->submit_lock, flags);
0925 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
0926 rx_submit_list) {
0927 spin_unlock_irqrestore(&rx->submit_lock, flags);
0928 usb_kill_urb(r->urb);
0929 spin_lock_irqsave(&rx->submit_lock, flags);
0930 }
0931 spin_unlock_irqrestore(&rx->submit_lock, flags);
0932
0933 cancel_work_sync(&udev->work_tx.work);
0934 cancel_work_sync(&udev->work_rx.work);
0935
0936 return 0;
0937 }
0938
0939 static int gdm_usb_resume(struct usb_interface *intf)
0940 {
0941 struct phy_dev *phy_dev;
0942 struct lte_udev *udev;
0943 struct tx_cxt *tx;
0944 struct rx_cxt *rx;
0945 unsigned long flags;
0946 int issue_count;
0947 int i;
0948
0949 phy_dev = usb_get_intfdata(intf);
0950 udev = phy_dev->priv_dev;
0951 rx = &udev->rx;
0952
0953 if (udev->usb_state != PM_SUSPEND) {
0954 dev_err(intf->usb_dev, "usb resume - invalid state\n");
0955 return -1;
0956 }
0957 udev->usb_state = PM_NORMAL;
0958
0959 spin_lock_irqsave(&rx->rx_lock, flags);
0960 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
0961 spin_unlock_irqrestore(&rx->rx_lock, flags);
0962
0963 if (issue_count >= 0) {
0964 for (i = 0; i < issue_count; i++)
0965 gdm_usb_recv(phy_dev->priv_dev,
0966 udev->rx_cb,
0967 phy_dev,
0968 USB_COMPLETE);
0969 }
0970
0971 tx = &udev->tx;
0972 spin_lock_irqsave(&tx->lock, flags);
0973 schedule_work(&udev->work_tx.work);
0974 spin_unlock_irqrestore(&tx->lock, flags);
0975
0976 return 0;
0977 }
0978
0979 static struct usb_driver gdm_usb_lte_driver = {
0980 .name = "gdm_lte",
0981 .probe = gdm_usb_probe,
0982 .disconnect = gdm_usb_disconnect,
0983 .id_table = id_table,
0984 .supports_autosuspend = 1,
0985 .suspend = gdm_usb_suspend,
0986 .resume = gdm_usb_resume,
0987 .reset_resume = gdm_usb_resume,
0988 };
0989
0990 static int __init gdm_usb_lte_init(void)
0991 {
0992 if (gdm_lte_event_init() < 0) {
0993 pr_err("error creating event\n");
0994 return -1;
0995 }
0996
0997 return usb_register(&gdm_usb_lte_driver);
0998 }
0999
1000 static void __exit gdm_usb_lte_exit(void)
1001 {
1002 gdm_lte_event_exit();
1003
1004 usb_deregister(&gdm_usb_lte_driver);
1005 }
1006
1007 module_init(gdm_usb_lte_init);
1008 module_exit(gdm_usb_lte_exit);
1009
1010 MODULE_VERSION(DRIVER_VERSION);
1011 MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1012 MODULE_LICENSE("GPL");