Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
0004  */
0005 #include <linux/dma-mapping.h>
0006 #include <linux/mei.h>
0007 
0008 #include "mei_dev.h"
0009 
0010 /**
0011  * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
0012  *     for the dma descriptor
0013  * @dev: mei_device
0014  * @dscr: dma descriptor
0015  *
0016  * Return:
0017  * * 0       - on success or zero allocation request
0018  * * -EINVAL - if size is not power of 2
0019  * * -ENOMEM - of allocation has failed
0020  */
0021 static int mei_dmam_dscr_alloc(struct mei_device *dev,
0022                    struct mei_dma_dscr *dscr)
0023 {
0024     if (!dscr->size)
0025         return 0;
0026 
0027     if (WARN_ON(!is_power_of_2(dscr->size)))
0028         return -EINVAL;
0029 
0030     if (dscr->vaddr)
0031         return 0;
0032 
0033     dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
0034                       GFP_KERNEL);
0035     if (!dscr->vaddr)
0036         return -ENOMEM;
0037 
0038     return 0;
0039 }
0040 
0041 /**
0042  * mei_dmam_dscr_free() - free a managed coherent buffer
0043  *     from the dma descriptor
0044  * @dev: mei_device
0045  * @dscr: dma descriptor
0046  */
0047 static void mei_dmam_dscr_free(struct mei_device *dev,
0048                    struct mei_dma_dscr *dscr)
0049 {
0050     if (!dscr->vaddr)
0051         return;
0052 
0053     dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
0054     dscr->vaddr = NULL;
0055 }
0056 
0057 /**
0058  * mei_dmam_ring_free() - free dma ring buffers
0059  * @dev: mei device
0060  */
0061 void mei_dmam_ring_free(struct mei_device *dev)
0062 {
0063     int i;
0064 
0065     for (i = 0; i < DMA_DSCR_NUM; i++)
0066         mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
0067 }
0068 
0069 /**
0070  * mei_dmam_ring_alloc() - allocate dma ring buffers
0071  * @dev: mei device
0072  *
0073  * Return: -ENOMEM on allocation failure 0 otherwise
0074  */
0075 int mei_dmam_ring_alloc(struct mei_device *dev)
0076 {
0077     int i;
0078 
0079     for (i = 0; i < DMA_DSCR_NUM; i++)
0080         if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
0081             goto err;
0082 
0083     return 0;
0084 
0085 err:
0086     mei_dmam_ring_free(dev);
0087     return -ENOMEM;
0088 }
0089 
0090 /**
0091  * mei_dma_ring_is_allocated() - check if dma ring is allocated
0092  * @dev: mei device
0093  *
0094  * Return: true if dma ring is allocated
0095  */
0096 bool mei_dma_ring_is_allocated(struct mei_device *dev)
0097 {
0098     return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
0099 }
0100 
0101 static inline
0102 struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
0103 {
0104     return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
0105 }
0106 
0107 /**
0108  * mei_dma_ring_reset() - reset the dma control block
0109  * @dev: mei device
0110  */
0111 void mei_dma_ring_reset(struct mei_device *dev)
0112 {
0113     struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
0114 
0115     if (!ctrl)
0116         return;
0117 
0118     memset(ctrl, 0, sizeof(*ctrl));
0119 }
0120 
0121 /**
0122  * mei_dma_copy_from() - copy from dma ring into buffer
0123  * @dev: mei device
0124  * @buf: data buffer
0125  * @offset: offset in slots.
0126  * @n: number of slots to copy.
0127  */
0128 static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
0129                 u32 offset, u32 n)
0130 {
0131     unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
0132 
0133     size_t b_offset = offset << 2;
0134     size_t b_n = n << 2;
0135 
0136     memcpy(buf, dbuf + b_offset, b_n);
0137 
0138     return b_n;
0139 }
0140 
0141 /**
0142  * mei_dma_copy_to() - copy to a buffer to the dma ring
0143  * @dev: mei device
0144  * @buf: data buffer
0145  * @offset: offset in slots.
0146  * @n: number of slots to copy.
0147  */
0148 static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
0149                   u32 offset, u32 n)
0150 {
0151     unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
0152 
0153     size_t b_offset = offset << 2;
0154     size_t b_n = n << 2;
0155 
0156     memcpy(hbuf + b_offset, buf, b_n);
0157 
0158     return b_n;
0159 }
0160 
0161 /**
0162  * mei_dma_ring_read() - read data from the ring
0163  * @dev: mei device
0164  * @buf: buffer to read into: may be NULL in case of droping the data.
0165  * @len: length to read.
0166  */
0167 void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
0168 {
0169     struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
0170     u32 dbuf_depth;
0171     u32 rd_idx, rem, slots;
0172 
0173     if (WARN_ON(!ctrl))
0174         return;
0175 
0176     dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
0177 
0178     if (!len)
0179         return;
0180 
0181     dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
0182     rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
0183     slots = mei_data2slots(len);
0184 
0185     /* if buf is NULL we drop the packet by advancing the pointer.*/
0186     if (!buf)
0187         goto out;
0188 
0189     if (rd_idx + slots > dbuf_depth) {
0190         buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
0191         rem = slots - (dbuf_depth - rd_idx);
0192         rd_idx = 0;
0193     } else {
0194         rem = slots;
0195     }
0196 
0197     mei_dma_copy_from(dev, buf, rd_idx, rem);
0198 out:
0199     WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
0200 }
0201 
0202 static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
0203 {
0204     return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
0205 }
0206 
0207 /**
0208  * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
0209  * @dev: mei_device
0210  *
0211  * Return: number of empty slots
0212  */
0213 u32 mei_dma_ring_empty_slots(struct mei_device *dev)
0214 {
0215     struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
0216     u32 wr_idx, rd_idx, hbuf_depth, empty;
0217 
0218     if (!mei_dma_ring_is_allocated(dev))
0219         return 0;
0220 
0221     if (WARN_ON(!ctrl))
0222         return 0;
0223 
0224     /* easier to work in slots */
0225     hbuf_depth = mei_dma_ring_hbuf_depth(dev);
0226     rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
0227     wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
0228 
0229     if (rd_idx > wr_idx)
0230         empty = rd_idx - wr_idx;
0231     else
0232         empty = hbuf_depth - (wr_idx - rd_idx);
0233 
0234     return empty;
0235 }
0236 
0237 /**
0238  * mei_dma_ring_write - write data to dma ring host buffer
0239  *
0240  * @dev: mei_device
0241  * @buf: data will be written
0242  * @len: data length
0243  */
0244 void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
0245 {
0246     struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
0247     u32 hbuf_depth;
0248     u32 wr_idx, rem, slots;
0249 
0250     if (WARN_ON(!ctrl))
0251         return;
0252 
0253     dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
0254     hbuf_depth = mei_dma_ring_hbuf_depth(dev);
0255     wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
0256     slots = mei_data2slots(len);
0257 
0258     if (wr_idx + slots > hbuf_depth) {
0259         buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
0260         rem = slots - (hbuf_depth - wr_idx);
0261         wr_idx = 0;
0262     } else {
0263         rem = slots;
0264     }
0265 
0266     mei_dma_copy_to(dev, buf, wr_idx, rem);
0267 
0268     WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
0269 }