Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 //
0003 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
0004 //
0005 // Author: Cezary Rojewski <cezary.rojewski@intel.com>
0006 //
0007 
0008 #include <linux/pci.h>
0009 #include <sound/hda_register.h>
0010 #include <sound/hdaudio_ext.h>
0011 #include "cldma.h"
0012 #include "registers.h"
0013 
0014 /* Stream Registers */
0015 #define AZX_CL_SD_BASE          0x80
0016 #define AZX_SD_CTL_STRM_MASK        GENMASK(23, 20)
0017 #define AZX_SD_CTL_STRM(s)      (((s)->stream_tag << 20) & AZX_SD_CTL_STRM_MASK)
0018 #define AZX_SD_BDLPL_BDLPLBA_MASK   GENMASK(31, 7)
0019 #define AZX_SD_BDLPL_BDLPLBA(lb)    ((lb) & AZX_SD_BDLPL_BDLPLBA_MASK)
0020 
0021 /* Software Position Based FIFO Capability Registers */
0022 #define AZX_CL_SPBFCS           0x20
0023 #define AZX_REG_CL_SPBFCTL      (AZX_CL_SPBFCS + 0x4)
0024 #define AZX_REG_CL_SD_SPIB      (AZX_CL_SPBFCS + 0x8)
0025 
0026 #define AVS_CL_OP_INTERVAL_US       3
0027 #define AVS_CL_OP_TIMEOUT_US        300
0028 #define AVS_CL_IOC_TIMEOUT_MS       300
0029 #define AVS_CL_STREAM_INDEX     0
0030 
0031 struct hda_cldma {
0032     struct device *dev;
0033     struct hdac_bus *bus;
0034     void __iomem *dsp_ba;
0035 
0036     unsigned int buffer_size;
0037     unsigned int num_periods;
0038     unsigned int stream_tag;
0039     void __iomem *sd_addr;
0040 
0041     struct snd_dma_buffer dmab_data;
0042     struct snd_dma_buffer dmab_bdl;
0043     struct delayed_work memcpy_work;
0044     struct completion completion;
0045 
0046     /* runtime */
0047     void *position;
0048     unsigned int remaining;
0049     unsigned int sd_status;
0050 };
0051 
0052 static void cldma_memcpy_work(struct work_struct *work);
0053 
0054 struct hda_cldma code_loader = {
0055     .stream_tag = AVS_CL_STREAM_INDEX + 1,
0056     .memcpy_work    = __DELAYED_WORK_INITIALIZER(code_loader.memcpy_work, cldma_memcpy_work, 0),
0057     .completion = COMPLETION_INITIALIZER(code_loader.completion),
0058 };
0059 
0060 void hda_cldma_fill(struct hda_cldma *cl)
0061 {
0062     unsigned int size, offset;
0063 
0064     if (cl->remaining > cl->buffer_size)
0065         size = cl->buffer_size;
0066     else
0067         size = cl->remaining;
0068 
0069     offset = snd_hdac_stream_readl(cl, CL_SD_SPIB);
0070     if (offset + size > cl->buffer_size) {
0071         unsigned int ss;
0072 
0073         ss = cl->buffer_size - offset;
0074         memcpy(cl->dmab_data.area + offset, cl->position, ss);
0075         offset = 0;
0076         size -= ss;
0077         cl->position += ss;
0078         cl->remaining -= ss;
0079     }
0080 
0081     memcpy(cl->dmab_data.area + offset, cl->position, size);
0082     cl->position += size;
0083     cl->remaining -= size;
0084 
0085     snd_hdac_stream_writel(cl, CL_SD_SPIB, offset + size);
0086 }
0087 
0088 static void cldma_memcpy_work(struct work_struct *work)
0089 {
0090     struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work);
0091     int ret;
0092 
0093     ret = hda_cldma_start(cl);
0094     if (ret < 0) {
0095         dev_err(cl->dev, "cldma set RUN failed: %d\n", ret);
0096         return;
0097     }
0098 
0099     while (true) {
0100         ret = wait_for_completion_timeout(&cl->completion,
0101                           msecs_to_jiffies(AVS_CL_IOC_TIMEOUT_MS));
0102         if (!ret) {
0103             dev_err(cl->dev, "cldma IOC timeout\n");
0104             break;
0105         }
0106 
0107         if (!(cl->sd_status & SD_INT_COMPLETE)) {
0108             dev_err(cl->dev, "cldma transfer error, SD status: 0x%08x\n",
0109                 cl->sd_status);
0110             break;
0111         }
0112 
0113         if (!cl->remaining)
0114             break;
0115 
0116         reinit_completion(&cl->completion);
0117         hda_cldma_fill(cl);
0118         /* enable CLDMA interrupt */
0119         snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
0120                       AVS_ADSP_ADSPIC_CLDMA);
0121     }
0122 }
0123 
0124 void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay)
0125 {
0126     if (!cl->remaining)
0127         return;
0128 
0129     reinit_completion(&cl->completion);
0130     /* fill buffer with the first chunk before scheduling run */
0131     hda_cldma_fill(cl);
0132 
0133     schedule_delayed_work(&cl->memcpy_work, start_delay);
0134 }
0135 
0136 int hda_cldma_start(struct hda_cldma *cl)
0137 {
0138     unsigned int reg;
0139 
0140     /* enable interrupts */
0141     snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
0142                   AVS_ADSP_ADSPIC_CLDMA);
0143     snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START,
0144                 SD_INT_MASK | SD_CTL_DMA_START);
0145 
0146     /* await DMA engine start */
0147     return snd_hdac_stream_readb_poll(cl, SD_CTL, reg, reg & SD_CTL_DMA_START,
0148                       AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
0149 }
0150 
0151 int hda_cldma_stop(struct hda_cldma *cl)
0152 {
0153     unsigned int reg;
0154     int ret;
0155 
0156     /* disable interrupts */
0157     snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
0158     snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, 0);
0159 
0160     /* await DMA engine stop */
0161     ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_DMA_START),
0162                      AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
0163     cancel_delayed_work_sync(&cl->memcpy_work);
0164 
0165     return ret;
0166 }
0167 
0168 int hda_cldma_reset(struct hda_cldma *cl)
0169 {
0170     unsigned int reg;
0171     int ret;
0172 
0173     ret = hda_cldma_stop(cl);
0174     if (ret < 0) {
0175         dev_err(cl->dev, "cldma stop failed: %d\n", ret);
0176         return ret;
0177     }
0178 
0179     snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, SD_CTL_STREAM_RESET);
0180     ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & SD_CTL_STREAM_RESET),
0181                      AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
0182     if (ret < 0) {
0183         dev_err(cl->dev, "cldma set SRST failed: %d\n", ret);
0184         return ret;
0185     }
0186 
0187     snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, 0);
0188     ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_STREAM_RESET),
0189                      AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
0190     if (ret < 0) {
0191         dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret);
0192         return ret;
0193     }
0194 
0195     return 0;
0196 }
0197 
0198 void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size)
0199 {
0200     /* setup runtime */
0201     cl->position = data;
0202     cl->remaining = size;
0203 }
0204 
0205 static void cldma_setup_bdle(struct hda_cldma *cl, u32 bdle_size)
0206 {
0207     struct snd_dma_buffer *dmab = &cl->dmab_data;
0208     __le32 *bdl = (__le32 *)cl->dmab_bdl.area;
0209     int remaining = cl->buffer_size;
0210     int offset = 0;
0211 
0212     cl->num_periods = 0;
0213 
0214     while (remaining > 0) {
0215         phys_addr_t addr;
0216         int chunk;
0217 
0218         addr = snd_sgbuf_get_addr(dmab, offset);
0219         bdl[0] = cpu_to_le32(lower_32_bits(addr));
0220         bdl[1] = cpu_to_le32(upper_32_bits(addr));
0221         chunk = snd_sgbuf_get_chunk_size(dmab, offset, bdle_size);
0222         bdl[2] = cpu_to_le32(chunk);
0223 
0224         remaining -= chunk;
0225         /* set IOC only for the last entry */
0226         bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
0227 
0228         bdl += 4;
0229         offset += chunk;
0230         cl->num_periods++;
0231     }
0232 }
0233 
0234 void hda_cldma_setup(struct hda_cldma *cl)
0235 {
0236     dma_addr_t bdl_addr = cl->dmab_bdl.addr;
0237 
0238     cldma_setup_bdle(cl, cl->buffer_size / 2);
0239 
0240     snd_hdac_stream_writel(cl, SD_BDLPL, AZX_SD_BDLPL_BDLPLBA(lower_32_bits(bdl_addr)));
0241     snd_hdac_stream_writel(cl, SD_BDLPU, upper_32_bits(bdl_addr));
0242 
0243     snd_hdac_stream_writel(cl, SD_CBL, cl->buffer_size);
0244     snd_hdac_stream_writeb(cl, SD_LVI, cl->num_periods - 1);
0245 
0246     snd_hdac_stream_updatel(cl, SD_CTL, AZX_SD_CTL_STRM_MASK, AZX_SD_CTL_STRM(cl));
0247     /* enable spib */
0248     snd_hdac_stream_writel(cl, CL_SPBFCTL, 1);
0249 }
0250 
0251 static irqreturn_t cldma_irq_handler(int irq, void *dev_id)
0252 {
0253     struct hda_cldma *cl = dev_id;
0254     u32 adspis;
0255 
0256     adspis = snd_hdac_adsp_readl(cl, AVS_ADSP_REG_ADSPIS);
0257     if (adspis == UINT_MAX)
0258         return IRQ_NONE;
0259     if (!(adspis & AVS_ADSP_ADSPIS_CLDMA))
0260         return IRQ_NONE;
0261 
0262     cl->sd_status = snd_hdac_stream_readb(cl, SD_STS);
0263     dev_warn(cl->dev, "%s sd_status: 0x%08x\n", __func__, cl->sd_status);
0264 
0265     /* disable CLDMA interrupt */
0266     snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
0267 
0268     complete(&cl->completion);
0269 
0270     return IRQ_HANDLED;
0271 }
0272 
0273 int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba,
0274            unsigned int buffer_size)
0275 {
0276     struct pci_dev *pci = to_pci_dev(bus->dev);
0277     int ret;
0278 
0279     ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, buffer_size, &cl->dmab_data);
0280     if (ret < 0)
0281         return ret;
0282 
0283     ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, bus->dev, BDL_SIZE, &cl->dmab_bdl);
0284     if (ret < 0)
0285         goto alloc_err;
0286 
0287     cl->dev = bus->dev;
0288     cl->bus = bus;
0289     cl->dsp_ba = dsp_ba;
0290     cl->buffer_size = buffer_size;
0291     cl->sd_addr = dsp_ba + AZX_CL_SD_BASE;
0292 
0293     ret = pci_request_irq(pci, 0, cldma_irq_handler, NULL, cl, "CLDMA");
0294     if (ret < 0) {
0295         dev_err(cl->dev, "Failed to request CLDMA IRQ handler: %d\n", ret);
0296         goto req_err;
0297     }
0298 
0299     return 0;
0300 
0301 req_err:
0302     snd_dma_free_pages(&cl->dmab_bdl);
0303 alloc_err:
0304     snd_dma_free_pages(&cl->dmab_data);
0305 
0306     return ret;
0307 }
0308 
0309 void hda_cldma_free(struct hda_cldma *cl)
0310 {
0311     struct pci_dev *pci = to_pci_dev(cl->dev);
0312 
0313     pci_free_irq(pci, 0, cl);
0314     snd_dma_free_pages(&cl->dmab_data);
0315     snd_dma_free_pages(&cl->dmab_bdl);
0316 }