Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
0003 
0004 #include <linux/types.h>
0005 #include <linux/io.h>
0006 
0007 #include "shm_ipc.h"
0008 
0009 #undef pr_fmt
0010 #define pr_fmt(fmt) "qtnfmac shm_ipc: %s: " fmt, __func__
0011 
0012 static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
0013 {
0014     const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
0015 
0016     return (flags & QTNF_SHM_IPC_NEW_DATA);
0017 }
0018 
0019 static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
0020 {
0021     size_t size;
0022     bool rx_buff_ok = true;
0023     struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
0024 
0025     shm_reg_hdr = &ipc->shm_region->headroom.hdr;
0026 
0027     size = readw(&shm_reg_hdr->data_len);
0028 
0029     if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
0030         pr_err("wrong rx packet size: %zu\n", size);
0031         rx_buff_ok = false;
0032     }
0033 
0034     if (likely(rx_buff_ok)) {
0035         ipc->rx_packet_count++;
0036         ipc->rx_callback.fn(ipc->rx_callback.arg,
0037                     ipc->shm_region->data, size);
0038     }
0039 
0040     writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
0041     readl(&shm_reg_hdr->flags); /* flush PCIe write */
0042 
0043     ipc->interrupt.fn(ipc->interrupt.arg);
0044 }
0045 
0046 static void qtnf_shm_ipc_irq_work(struct work_struct *work)
0047 {
0048     struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
0049                         irq_work);
0050 
0051     while (qtnf_shm_ipc_has_new_data(ipc))
0052         qtnf_shm_handle_new_data(ipc);
0053 }
0054 
0055 static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
0056 {
0057     u32 flags;
0058 
0059     flags = readl(&ipc->shm_region->headroom.hdr.flags);
0060 
0061     if (flags & QTNF_SHM_IPC_NEW_DATA)
0062         queue_work(ipc->workqueue, &ipc->irq_work);
0063 }
0064 
0065 static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
0066 {
0067     u32 flags;
0068 
0069     if (!READ_ONCE(ipc->waiting_for_ack))
0070         return;
0071 
0072     flags = readl(&ipc->shm_region->headroom.hdr.flags);
0073 
0074     if (flags & QTNF_SHM_IPC_ACK) {
0075         WRITE_ONCE(ipc->waiting_for_ack, 0);
0076         complete(&ipc->tx_completion);
0077     }
0078 }
0079 
0080 int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
0081               enum qtnf_shm_ipc_direction direction,
0082               struct qtnf_shm_ipc_region __iomem *shm_region,
0083               struct workqueue_struct *workqueue,
0084               const struct qtnf_shm_ipc_int *interrupt,
0085               const struct qtnf_shm_ipc_rx_callback *rx_callback)
0086 {
0087     BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
0088              QTN_IPC_REG_HDR_SZ);
0089     BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
0090 
0091     ipc->shm_region = shm_region;
0092     ipc->direction = direction;
0093     ipc->interrupt = *interrupt;
0094     ipc->rx_callback = *rx_callback;
0095     ipc->tx_packet_count = 0;
0096     ipc->rx_packet_count = 0;
0097     ipc->workqueue = workqueue;
0098     ipc->waiting_for_ack = 0;
0099     ipc->tx_timeout_count = 0;
0100 
0101     switch (direction) {
0102     case QTNF_SHM_IPC_OUTBOUND:
0103         ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
0104         break;
0105     case QTNF_SHM_IPC_INBOUND:
0106         ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
0107         break;
0108     default:
0109         return -EINVAL;
0110     }
0111 
0112     INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
0113     init_completion(&ipc->tx_completion);
0114 
0115     return 0;
0116 }
0117 
0118 void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
0119 {
0120     complete_all(&ipc->tx_completion);
0121 }
0122 
0123 int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
0124 {
0125     int ret = 0;
0126     struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
0127 
0128     shm_reg_hdr = &ipc->shm_region->headroom.hdr;
0129 
0130     if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
0131         return -E2BIG;
0132 
0133     ipc->tx_packet_count++;
0134 
0135     writew(size, &shm_reg_hdr->data_len);
0136     memcpy_toio(ipc->shm_region->data, buf, size);
0137 
0138     /* sync previous writes before proceeding */
0139     dma_wmb();
0140 
0141     WRITE_ONCE(ipc->waiting_for_ack, 1);
0142 
0143     /* sync previous memory write before announcing new data ready */
0144     wmb();
0145 
0146     writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
0147     readl(&shm_reg_hdr->flags); /* flush PCIe write */
0148 
0149     ipc->interrupt.fn(ipc->interrupt.arg);
0150 
0151     if (!wait_for_completion_timeout(&ipc->tx_completion,
0152                      QTN_SHM_IPC_ACK_TIMEOUT)) {
0153         ret = -ETIMEDOUT;
0154         ipc->tx_timeout_count++;
0155         pr_err("TX ACK timeout\n");
0156     }
0157 
0158     /* now we're not waiting for ACK even in case of timeout */
0159     WRITE_ONCE(ipc->waiting_for_ack, 0);
0160 
0161     return ret;
0162 }