0001
0002
0003
0004
0005
0006
0007 #include <linux/atomic.h>
0008 #include <linux/bitops.h>
0009 #include <linux/completion.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/if_arp.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/mod_devicetable.h>
0015 #include <linux/module.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/pm_runtime.h>
0019 #include <linux/soc/qcom/smem_state.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/wait.h>
0022 #include <linux/workqueue.h>
0023 #include <net/pkt_sched.h>
0024
0025 #define BAM_DMUX_BUFFER_SIZE SZ_2K
0026 #define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
0027 #define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
0028 #define BAM_DMUX_NUM_SKB 32
0029
0030 #define BAM_DMUX_HDR_MAGIC 0x33fc
0031
0032 #define BAM_DMUX_AUTOSUSPEND_DELAY 1000
0033 #define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
0034
0035 enum {
0036 BAM_DMUX_CMD_DATA,
0037 BAM_DMUX_CMD_OPEN,
0038 BAM_DMUX_CMD_CLOSE,
0039 };
0040
0041 enum {
0042 BAM_DMUX_CH_DATA_0,
0043 BAM_DMUX_CH_DATA_1,
0044 BAM_DMUX_CH_DATA_2,
0045 BAM_DMUX_CH_DATA_3,
0046 BAM_DMUX_CH_DATA_4,
0047 BAM_DMUX_CH_DATA_5,
0048 BAM_DMUX_CH_DATA_6,
0049 BAM_DMUX_CH_DATA_7,
0050 BAM_DMUX_NUM_CH
0051 };
0052
0053 struct bam_dmux_hdr {
0054 u16 magic;
0055 u8 signal;
0056 u8 cmd;
0057 u8 pad;
0058 u8 ch;
0059 u16 len;
0060 };
0061
0062 struct bam_dmux_skb_dma {
0063 struct bam_dmux *dmux;
0064 struct sk_buff *skb;
0065 dma_addr_t addr;
0066 };
0067
0068 struct bam_dmux {
0069 struct device *dev;
0070
0071 int pc_irq;
0072 bool pc_state, pc_ack_state;
0073 struct qcom_smem_state *pc, *pc_ack;
0074 u32 pc_mask, pc_ack_mask;
0075 wait_queue_head_t pc_wait;
0076 struct completion pc_ack_completion;
0077
0078 struct dma_chan *rx, *tx;
0079 struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
0080 struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
0081 spinlock_t tx_lock;
0082 unsigned int tx_next_skb;
0083 atomic_long_t tx_deferred_skb;
0084 struct work_struct tx_wakeup_work;
0085
0086 DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
0087 struct work_struct register_netdev_work;
0088 struct net_device *netdevs[BAM_DMUX_NUM_CH];
0089 };
0090
0091 struct bam_dmux_netdev {
0092 struct bam_dmux *dmux;
0093 u8 ch;
0094 };
0095
0096 static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
0097 {
0098 reinit_completion(&dmux->pc_ack_completion);
0099 qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
0100 enable ? dmux->pc_mask : 0);
0101 }
0102
0103 static void bam_dmux_pc_ack(struct bam_dmux *dmux)
0104 {
0105 qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
0106 dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
0107 dmux->pc_ack_state = !dmux->pc_ack_state;
0108 }
0109
0110 static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
0111 enum dma_data_direction dir)
0112 {
0113 struct device *dev = skb_dma->dmux->dev;
0114
0115 skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
0116 if (dma_mapping_error(dev, skb_dma->addr)) {
0117 dev_err(dev, "Failed to DMA map buffer\n");
0118 skb_dma->addr = 0;
0119 return false;
0120 }
0121
0122 return true;
0123 }
0124
0125 static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
0126 enum dma_data_direction dir)
0127 {
0128 dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
0129 skb_dma->addr = 0;
0130 }
0131
0132 static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
0133 {
0134 int i;
0135
0136 dev_dbg(dmux->dev, "wake queues\n");
0137
0138 for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
0139 struct net_device *netdev = dmux->netdevs[i];
0140
0141 if (netdev && netif_running(netdev))
0142 netif_wake_queue(netdev);
0143 }
0144 }
0145
0146 static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
0147 {
0148 int i;
0149
0150 dev_dbg(dmux->dev, "stop queues\n");
0151
0152 for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
0153 struct net_device *netdev = dmux->netdevs[i];
0154
0155 if (netdev)
0156 netif_stop_queue(netdev);
0157 }
0158 }
0159
0160 static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
0161 {
0162 struct bam_dmux *dmux = skb_dma->dmux;
0163 unsigned long flags;
0164
0165 pm_runtime_mark_last_busy(dmux->dev);
0166 pm_runtime_put_autosuspend(dmux->dev);
0167
0168 if (skb_dma->addr)
0169 bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
0170
0171 spin_lock_irqsave(&dmux->tx_lock, flags);
0172 skb_dma->skb = NULL;
0173 if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
0174 bam_dmux_tx_wake_queues(dmux);
0175 spin_unlock_irqrestore(&dmux->tx_lock, flags);
0176 }
0177
0178 static void bam_dmux_tx_callback(void *data)
0179 {
0180 struct bam_dmux_skb_dma *skb_dma = data;
0181 struct sk_buff *skb = skb_dma->skb;
0182
0183 bam_dmux_tx_done(skb_dma);
0184 dev_consume_skb_any(skb);
0185 }
0186
0187 static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
0188 {
0189 struct bam_dmux *dmux = skb_dma->dmux;
0190 struct dma_async_tx_descriptor *desc;
0191
0192 desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
0193 skb_dma->skb->len, DMA_MEM_TO_DEV,
0194 DMA_PREP_INTERRUPT);
0195 if (!desc) {
0196 dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
0197 return false;
0198 }
0199
0200 desc->callback = bam_dmux_tx_callback;
0201 desc->callback_param = skb_dma;
0202 desc->cookie = dmaengine_submit(desc);
0203 return true;
0204 }
0205
0206 static struct bam_dmux_skb_dma *
0207 bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
0208 {
0209 struct bam_dmux_skb_dma *skb_dma;
0210 unsigned long flags;
0211
0212 spin_lock_irqsave(&dmux->tx_lock, flags);
0213
0214 skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
0215 if (skb_dma->skb) {
0216 bam_dmux_tx_stop_queues(dmux);
0217 spin_unlock_irqrestore(&dmux->tx_lock, flags);
0218 return NULL;
0219 }
0220 skb_dma->skb = skb;
0221
0222 dmux->tx_next_skb++;
0223 if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
0224 bam_dmux_tx_stop_queues(dmux);
0225
0226 spin_unlock_irqrestore(&dmux->tx_lock, flags);
0227 return skb_dma;
0228 }
0229
0230 static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
0231 {
0232 struct bam_dmux *dmux = bndev->dmux;
0233 struct bam_dmux_skb_dma *skb_dma;
0234 struct bam_dmux_hdr *hdr;
0235 struct sk_buff *skb;
0236 int ret;
0237
0238 skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
0239 if (!skb)
0240 return -ENOMEM;
0241
0242 hdr = skb_put_zero(skb, sizeof(*hdr));
0243 hdr->magic = BAM_DMUX_HDR_MAGIC;
0244 hdr->cmd = cmd;
0245 hdr->ch = bndev->ch;
0246
0247 skb_dma = bam_dmux_tx_queue(dmux, skb);
0248 if (!skb_dma) {
0249 ret = -EAGAIN;
0250 goto free_skb;
0251 }
0252
0253 ret = pm_runtime_get_sync(dmux->dev);
0254 if (ret < 0)
0255 goto tx_fail;
0256
0257 if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
0258 ret = -ENOMEM;
0259 goto tx_fail;
0260 }
0261
0262 if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
0263 ret = -EIO;
0264 goto tx_fail;
0265 }
0266
0267 dma_async_issue_pending(dmux->tx);
0268 return 0;
0269
0270 tx_fail:
0271 bam_dmux_tx_done(skb_dma);
0272 free_skb:
0273 dev_kfree_skb(skb);
0274 return ret;
0275 }
0276
0277 static int bam_dmux_netdev_open(struct net_device *netdev)
0278 {
0279 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
0280 int ret;
0281
0282 ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
0283 if (ret)
0284 return ret;
0285
0286 netif_start_queue(netdev);
0287 return 0;
0288 }
0289
0290 static int bam_dmux_netdev_stop(struct net_device *netdev)
0291 {
0292 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
0293
0294 netif_stop_queue(netdev);
0295 bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
0296 return 0;
0297 }
0298
0299 static unsigned int needed_room(unsigned int avail, unsigned int needed)
0300 {
0301 if (avail >= needed)
0302 return 0;
0303 return needed - avail;
0304 }
0305
0306 static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
0307 struct sk_buff *skb)
0308 {
0309 unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
0310 unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
0311 unsigned int tail = needed_room(skb_tailroom(skb), pad);
0312 struct bam_dmux_hdr *hdr;
0313 int ret;
0314
0315 if (head || tail || skb_cloned(skb)) {
0316 ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
0317 if (ret)
0318 return ret;
0319 }
0320
0321 hdr = skb_push(skb, sizeof(*hdr));
0322 hdr->magic = BAM_DMUX_HDR_MAGIC;
0323 hdr->signal = 0;
0324 hdr->cmd = BAM_DMUX_CMD_DATA;
0325 hdr->pad = pad;
0326 hdr->ch = bndev->ch;
0327 hdr->len = skb->len - sizeof(*hdr);
0328 if (pad)
0329 skb_put_zero(skb, pad);
0330
0331 return 0;
0332 }
0333
0334 static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
0335 struct net_device *netdev)
0336 {
0337 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
0338 struct bam_dmux *dmux = bndev->dmux;
0339 struct bam_dmux_skb_dma *skb_dma;
0340 int active, ret;
0341
0342 skb_dma = bam_dmux_tx_queue(dmux, skb);
0343 if (!skb_dma)
0344 return NETDEV_TX_BUSY;
0345
0346 active = pm_runtime_get(dmux->dev);
0347 if (active < 0 && active != -EINPROGRESS)
0348 goto drop;
0349
0350 ret = bam_dmux_tx_prepare_skb(bndev, skb);
0351 if (ret)
0352 goto drop;
0353
0354 if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
0355 goto drop;
0356
0357 if (active <= 0) {
0358
0359 if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
0360 &dmux->tx_deferred_skb))
0361 queue_pm_work(&dmux->tx_wakeup_work);
0362 return NETDEV_TX_OK;
0363 }
0364
0365 if (!bam_dmux_skb_dma_submit_tx(skb_dma))
0366 goto drop;
0367
0368 dma_async_issue_pending(dmux->tx);
0369 return NETDEV_TX_OK;
0370
0371 drop:
0372 bam_dmux_tx_done(skb_dma);
0373 dev_kfree_skb_any(skb);
0374 return NETDEV_TX_OK;
0375 }
0376
0377 static void bam_dmux_tx_wakeup_work(struct work_struct *work)
0378 {
0379 struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
0380 unsigned long pending;
0381 int ret, i;
0382
0383 ret = pm_runtime_resume_and_get(dmux->dev);
0384 if (ret < 0) {
0385 dev_err(dmux->dev, "Failed to resume: %d\n", ret);
0386 return;
0387 }
0388
0389 pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
0390 if (!pending)
0391 goto out;
0392
0393 dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
0394 for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
0395 bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
0396 }
0397 dma_async_issue_pending(dmux->tx);
0398
0399 out:
0400 pm_runtime_mark_last_busy(dmux->dev);
0401 pm_runtime_put_autosuspend(dmux->dev);
0402 }
0403
0404 static const struct net_device_ops bam_dmux_ops = {
0405 .ndo_open = bam_dmux_netdev_open,
0406 .ndo_stop = bam_dmux_netdev_stop,
0407 .ndo_start_xmit = bam_dmux_netdev_start_xmit,
0408 };
0409
0410 static const struct device_type wwan_type = {
0411 .name = "wwan",
0412 };
0413
0414 static void bam_dmux_netdev_setup(struct net_device *dev)
0415 {
0416 dev->netdev_ops = &bam_dmux_ops;
0417
0418 dev->type = ARPHRD_RAWIP;
0419 SET_NETDEV_DEVTYPE(dev, &wwan_type);
0420 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
0421
0422 dev->mtu = ETH_DATA_LEN;
0423 dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
0424 dev->needed_headroom = sizeof(struct bam_dmux_hdr);
0425 dev->needed_tailroom = sizeof(u32);
0426 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
0427
0428
0429 dev->addr_assign_type = NET_ADDR_RANDOM;
0430 eth_random_addr(dev->perm_addr);
0431 }
0432
0433 static void bam_dmux_register_netdev_work(struct work_struct *work)
0434 {
0435 struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
0436 struct bam_dmux_netdev *bndev;
0437 struct net_device *netdev;
0438 int ch, ret;
0439
0440 for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
0441 if (dmux->netdevs[ch])
0442 continue;
0443
0444 netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
0445 bam_dmux_netdev_setup);
0446 if (!netdev)
0447 return;
0448
0449 SET_NETDEV_DEV(netdev, dmux->dev);
0450 netdev->dev_port = ch;
0451
0452 bndev = netdev_priv(netdev);
0453 bndev->dmux = dmux;
0454 bndev->ch = ch;
0455
0456 ret = register_netdev(netdev);
0457 if (ret) {
0458 dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
0459 ch, ret);
0460 free_netdev(netdev);
0461 return;
0462 }
0463
0464 dmux->netdevs[ch] = netdev;
0465 }
0466 }
0467
0468 static void bam_dmux_rx_callback(void *data);
0469
0470 static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
0471 {
0472 struct bam_dmux *dmux = skb_dma->dmux;
0473 struct dma_async_tx_descriptor *desc;
0474
0475 desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
0476 skb_dma->skb->len, DMA_DEV_TO_MEM,
0477 DMA_PREP_INTERRUPT);
0478 if (!desc) {
0479 dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
0480 return false;
0481 }
0482
0483 desc->callback = bam_dmux_rx_callback;
0484 desc->callback_param = skb_dma;
0485 desc->cookie = dmaengine_submit(desc);
0486 return true;
0487 }
0488
0489 static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
0490 {
0491 if (!skb_dma->skb) {
0492 skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
0493 if (!skb_dma->skb)
0494 return false;
0495 skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
0496 }
0497
0498 return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
0499 bam_dmux_skb_dma_submit_rx(skb_dma);
0500 }
0501
0502 static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
0503 {
0504 struct bam_dmux *dmux = skb_dma->dmux;
0505 struct sk_buff *skb = skb_dma->skb;
0506 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
0507 struct net_device *netdev = dmux->netdevs[hdr->ch];
0508
0509 if (!netdev || !netif_running(netdev)) {
0510 dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
0511 return;
0512 }
0513
0514 if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
0515 dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
0516 hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
0517 return;
0518 }
0519
0520 skb_dma->skb = NULL;
0521
0522 skb_pull(skb, sizeof(*hdr));
0523 skb_trim(skb, hdr->len);
0524 skb->dev = netdev;
0525
0526
0527 switch (skb->data[0] & 0xf0) {
0528 case 0x40:
0529 skb->protocol = htons(ETH_P_IP);
0530 break;
0531 case 0x60:
0532 skb->protocol = htons(ETH_P_IPV6);
0533 break;
0534 default:
0535 skb->protocol = htons(ETH_P_MAP);
0536 break;
0537 }
0538
0539 netif_receive_skb(skb);
0540 }
0541
0542 static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
0543 {
0544 struct net_device *netdev = dmux->netdevs[hdr->ch];
0545
0546 dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
0547
0548 if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
0549 dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
0550 return;
0551 }
0552
0553 if (netdev) {
0554 netif_device_attach(netdev);
0555 } else {
0556
0557 schedule_work(&dmux->register_netdev_work);
0558 }
0559 }
0560
0561 static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
0562 {
0563 struct net_device *netdev = dmux->netdevs[hdr->ch];
0564
0565 dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
0566
0567 if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
0568 dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
0569 return;
0570 }
0571
0572 if (netdev)
0573 netif_device_detach(netdev);
0574 }
0575
0576 static void bam_dmux_rx_callback(void *data)
0577 {
0578 struct bam_dmux_skb_dma *skb_dma = data;
0579 struct bam_dmux *dmux = skb_dma->dmux;
0580 struct sk_buff *skb = skb_dma->skb;
0581 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
0582
0583 bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
0584
0585 if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
0586 dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
0587 goto out;
0588 }
0589
0590 if (hdr->ch >= BAM_DMUX_NUM_CH) {
0591 dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
0592 goto out;
0593 }
0594
0595 switch (hdr->cmd) {
0596 case BAM_DMUX_CMD_DATA:
0597 bam_dmux_cmd_data(skb_dma);
0598 break;
0599 case BAM_DMUX_CMD_OPEN:
0600 bam_dmux_cmd_open(dmux, hdr);
0601 break;
0602 case BAM_DMUX_CMD_CLOSE:
0603 bam_dmux_cmd_close(dmux, hdr);
0604 break;
0605 default:
0606 dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
0607 hdr->cmd, hdr->ch);
0608 break;
0609 }
0610
0611 out:
0612 if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
0613 dma_async_issue_pending(dmux->rx);
0614 }
0615
0616 static bool bam_dmux_power_on(struct bam_dmux *dmux)
0617 {
0618 struct device *dev = dmux->dev;
0619 struct dma_slave_config dma_rx_conf = {
0620 .direction = DMA_DEV_TO_MEM,
0621 .src_maxburst = BAM_DMUX_BUFFER_SIZE,
0622 };
0623 int i;
0624
0625 dmux->rx = dma_request_chan(dev, "rx");
0626 if (IS_ERR(dmux->rx)) {
0627 dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
0628 dmux->rx = NULL;
0629 return false;
0630 }
0631 dmaengine_slave_config(dmux->rx, &dma_rx_conf);
0632
0633 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
0634 if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
0635 return false;
0636 }
0637 dma_async_issue_pending(dmux->rx);
0638
0639 return true;
0640 }
0641
0642 static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
0643 enum dma_data_direction dir)
0644 {
0645 int i;
0646
0647 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
0648 struct bam_dmux_skb_dma *skb_dma = &skbs[i];
0649
0650 if (skb_dma->addr)
0651 bam_dmux_skb_dma_unmap(skb_dma, dir);
0652 if (skb_dma->skb) {
0653 dev_kfree_skb(skb_dma->skb);
0654 skb_dma->skb = NULL;
0655 }
0656 }
0657 }
0658
0659 static void bam_dmux_power_off(struct bam_dmux *dmux)
0660 {
0661 if (dmux->tx) {
0662 dmaengine_terminate_sync(dmux->tx);
0663 dma_release_channel(dmux->tx);
0664 dmux->tx = NULL;
0665 }
0666
0667 if (dmux->rx) {
0668 dmaengine_terminate_sync(dmux->rx);
0669 dma_release_channel(dmux->rx);
0670 dmux->rx = NULL;
0671 }
0672
0673 bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
0674 }
0675
0676 static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
0677 {
0678 struct bam_dmux *dmux = data;
0679 bool new_state = !dmux->pc_state;
0680
0681 dev_dbg(dmux->dev, "pc: %u\n", new_state);
0682
0683 if (new_state) {
0684 if (bam_dmux_power_on(dmux))
0685 bam_dmux_pc_ack(dmux);
0686 else
0687 bam_dmux_power_off(dmux);
0688 } else {
0689 bam_dmux_power_off(dmux);
0690 bam_dmux_pc_ack(dmux);
0691 }
0692
0693 dmux->pc_state = new_state;
0694 wake_up_all(&dmux->pc_wait);
0695
0696 return IRQ_HANDLED;
0697 }
0698
0699 static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
0700 {
0701 struct bam_dmux *dmux = data;
0702
0703 dev_dbg(dmux->dev, "pc ack\n");
0704 complete_all(&dmux->pc_ack_completion);
0705
0706 return IRQ_HANDLED;
0707 }
0708
0709 static int bam_dmux_runtime_suspend(struct device *dev)
0710 {
0711 struct bam_dmux *dmux = dev_get_drvdata(dev);
0712
0713 dev_dbg(dev, "runtime suspend\n");
0714 bam_dmux_pc_vote(dmux, false);
0715
0716 return 0;
0717 }
0718
0719 static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
0720 {
0721 struct bam_dmux *dmux = dev_get_drvdata(dev);
0722
0723 dev_dbg(dev, "runtime resume\n");
0724
0725
0726 if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
0727 BAM_DMUX_REMOTE_TIMEOUT))
0728 return -ETIMEDOUT;
0729
0730
0731 bam_dmux_pc_vote(dmux, true);
0732
0733
0734 if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
0735 BAM_DMUX_REMOTE_TIMEOUT)) {
0736 bam_dmux_pc_vote(dmux, false);
0737 return -ETIMEDOUT;
0738 }
0739
0740
0741 if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
0742 BAM_DMUX_REMOTE_TIMEOUT)) {
0743 bam_dmux_pc_vote(dmux, false);
0744 return -ETIMEDOUT;
0745 }
0746
0747
0748 if (!dmux->rx) {
0749 bam_dmux_pc_vote(dmux, false);
0750 return -ENXIO;
0751 }
0752
0753
0754 if (dmux->tx)
0755 return 0;
0756
0757 dmux->tx = dma_request_chan(dev, "tx");
0758 if (IS_ERR(dmux->tx)) {
0759 dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
0760 dmux->tx = NULL;
0761 bam_dmux_runtime_suspend(dev);
0762 return -ENXIO;
0763 }
0764
0765 return 0;
0766 }
0767
0768 static int bam_dmux_probe(struct platform_device *pdev)
0769 {
0770 struct device *dev = &pdev->dev;
0771 struct bam_dmux *dmux;
0772 int ret, pc_ack_irq, i;
0773 unsigned int bit;
0774
0775 dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
0776 if (!dmux)
0777 return -ENOMEM;
0778
0779 dmux->dev = dev;
0780 platform_set_drvdata(pdev, dmux);
0781
0782 dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
0783 if (dmux->pc_irq < 0)
0784 return dmux->pc_irq;
0785
0786 pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
0787 if (pc_ack_irq < 0)
0788 return pc_ack_irq;
0789
0790 dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
0791 if (IS_ERR(dmux->pc))
0792 return dev_err_probe(dev, PTR_ERR(dmux->pc),
0793 "Failed to get pc state\n");
0794 dmux->pc_mask = BIT(bit);
0795
0796 dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
0797 if (IS_ERR(dmux->pc_ack))
0798 return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
0799 "Failed to get pc-ack state\n");
0800 dmux->pc_ack_mask = BIT(bit);
0801
0802 init_waitqueue_head(&dmux->pc_wait);
0803 init_completion(&dmux->pc_ack_completion);
0804 complete_all(&dmux->pc_ack_completion);
0805
0806 spin_lock_init(&dmux->tx_lock);
0807 INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
0808 INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
0809
0810 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
0811 dmux->rx_skbs[i].dmux = dmux;
0812 dmux->tx_skbs[i].dmux = dmux;
0813 }
0814
0815
0816
0817
0818
0819 pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
0820 pm_runtime_use_autosuspend(dev);
0821 pm_runtime_enable(dev);
0822
0823 ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
0824 IRQF_ONESHOT, NULL, dmux);
0825 if (ret)
0826 return ret;
0827
0828 ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
0829 IRQF_ONESHOT, NULL, dmux);
0830 if (ret)
0831 return ret;
0832
0833 ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
0834 &dmux->pc_state);
0835 if (ret)
0836 return ret;
0837
0838
0839 if (dmux->pc_state) {
0840 if (bam_dmux_power_on(dmux))
0841 bam_dmux_pc_ack(dmux);
0842 else
0843 bam_dmux_power_off(dmux);
0844 }
0845
0846 return 0;
0847 }
0848
0849 static int bam_dmux_remove(struct platform_device *pdev)
0850 {
0851 struct bam_dmux *dmux = platform_get_drvdata(pdev);
0852 struct device *dev = dmux->dev;
0853 LIST_HEAD(list);
0854 int i;
0855
0856
0857 cancel_work_sync(&dmux->register_netdev_work);
0858 rtnl_lock();
0859 for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
0860 if (dmux->netdevs[i])
0861 unregister_netdevice_queue(dmux->netdevs[i], &list);
0862 unregister_netdevice_many(&list);
0863 rtnl_unlock();
0864 cancel_work_sync(&dmux->tx_wakeup_work);
0865
0866
0867 pm_runtime_disable(dev);
0868 pm_runtime_dont_use_autosuspend(dev);
0869 bam_dmux_runtime_suspend(dev);
0870 pm_runtime_set_suspended(dev);
0871
0872
0873 if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
0874 dev_err(dev, "Timed out waiting for remote side to suspend\n");
0875
0876
0877 disable_irq(dmux->pc_irq);
0878 bam_dmux_power_off(dmux);
0879 bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
0880
0881 return 0;
0882 }
0883
0884 static const struct dev_pm_ops bam_dmux_pm_ops = {
0885 SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
0886 };
0887
0888 static const struct of_device_id bam_dmux_of_match[] = {
0889 { .compatible = "qcom,bam-dmux" },
0890 { }
0891 };
0892 MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
0893
0894 static struct platform_driver bam_dmux_driver = {
0895 .probe = bam_dmux_probe,
0896 .remove = bam_dmux_remove,
0897 .driver = {
0898 .name = "bam-dmux",
0899 .pm = &bam_dmux_pm_ops,
0900 .of_match_table = bam_dmux_of_match,
0901 },
0902 };
0903 module_platform_driver(bam_dmux_driver);
0904
0905 MODULE_LICENSE("GPL v2");
0906 MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
0907 MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");