0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include <linux/sched.h>
0010 #include <linux/wait.h>
0011 #include <linux/delay.h>
0012 #include <linux/dma-mapping.h>
0013 #include "ishtp-dev.h"
0014 #include "client.h"
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
0025 {
0026 dma_addr_t h;
0027
0028 if (dev->ishtp_host_dma_tx_buf)
0029 return;
0030
0031 dev->ishtp_host_dma_tx_buf_size = 1024*1024;
0032 dev->ishtp_host_dma_rx_buf_size = 1024*1024;
0033
0034
0035 dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
0036 dev->ishtp_host_dma_tx_buf_size,
0037 &h, GFP_KERNEL);
0038 if (dev->ishtp_host_dma_tx_buf)
0039 dev->ishtp_host_dma_tx_buf_phys = h;
0040
0041 dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
0042 DMA_SLOT_SIZE;
0043
0044 dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
0045 sizeof(uint8_t),
0046 GFP_KERNEL);
0047 spin_lock_init(&dev->ishtp_dma_tx_lock);
0048
0049
0050 dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
0051 dev->ishtp_host_dma_rx_buf_size,
0052 &h, GFP_KERNEL);
0053
0054 if (dev->ishtp_host_dma_rx_buf)
0055 dev->ishtp_host_dma_rx_buf_phys = h;
0056 }
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
0067 {
0068 dma_addr_t h;
0069
0070 if (dev->ishtp_host_dma_tx_buf) {
0071 h = dev->ishtp_host_dma_tx_buf_phys;
0072 dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
0073 dev->ishtp_host_dma_tx_buf, h);
0074 }
0075
0076 if (dev->ishtp_host_dma_rx_buf) {
0077 h = dev->ishtp_host_dma_rx_buf_phys;
0078 dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
0079 dev->ishtp_host_dma_rx_buf, h);
0080 }
0081
0082 kfree(dev->ishtp_dma_tx_map);
0083 dev->ishtp_host_dma_tx_buf = NULL;
0084 dev->ishtp_host_dma_rx_buf = NULL;
0085 dev->ishtp_dma_tx_map = NULL;
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
0099 uint32_t size)
0100 {
0101 unsigned long flags;
0102 int i, j, free;
0103
0104 int required_slots = (size / DMA_SLOT_SIZE)
0105 + 1 * (size % DMA_SLOT_SIZE != 0);
0106
0107 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
0108 for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
0109 free = 1;
0110 for (j = 0; j < required_slots; j++)
0111 if (dev->ishtp_dma_tx_map[i+j]) {
0112 free = 0;
0113 i += j;
0114 break;
0115 }
0116 if (free) {
0117
0118 for (j = 0; j < required_slots; j++)
0119 dev->ishtp_dma_tx_map[i+j] = 1;
0120 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
0121 return (i * DMA_SLOT_SIZE) +
0122 (unsigned char *)dev->ishtp_host_dma_tx_buf;
0123 }
0124 }
0125 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
0126 dev_err(dev->devc, "No free DMA buffer to send msg\n");
0127 return NULL;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
0140 void *msg_addr,
0141 uint8_t size)
0142 {
0143 unsigned long flags;
0144 int acked_slots = (size / DMA_SLOT_SIZE)
0145 + 1 * (size % DMA_SLOT_SIZE != 0);
0146 int i, j;
0147
0148 if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
0149 dev_err(dev->devc, "Bad DMA Tx ack address\n");
0150 return;
0151 }
0152
0153 i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
0154 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
0155 for (j = 0; j < acked_slots; j++) {
0156 if ((i + j) >= dev->ishtp_dma_num_slots ||
0157 !dev->ishtp_dma_tx_map[i+j]) {
0158
0159 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
0160 dev_err(dev->devc, "Bad DMA Tx ack address\n");
0161 return;
0162 }
0163 dev->ishtp_dma_tx_map[i+j] = 0;
0164 }
0165 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
0166 }