0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-mapping.h>
0008 #include <linux/errno.h>
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <sound/core.h>
0012 #include <sound/pcm.h>
0013 #include <sound/soc.h>
0014
0015 #include "aio.h"
0016
0017 static struct snd_pcm_hardware uniphier_aiodma_hw = {
0018 .info = SNDRV_PCM_INFO_MMAP |
0019 SNDRV_PCM_INFO_MMAP_VALID |
0020 SNDRV_PCM_INFO_INTERLEAVED,
0021 .period_bytes_min = 256,
0022 .period_bytes_max = 4096,
0023 .periods_min = 4,
0024 .periods_max = 1024,
0025 .buffer_bytes_max = 128 * 1024,
0026 };
0027
0028 static void aiodma_pcm_irq(struct uniphier_aio_sub *sub)
0029 {
0030 struct snd_pcm_runtime *runtime = sub->substream->runtime;
0031 int bytes = runtime->period_size *
0032 runtime->channels * samples_to_bytes(runtime, 1);
0033 int ret;
0034
0035 spin_lock(&sub->lock);
0036 ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes,
0037 sub->threshold + bytes);
0038 if (!ret)
0039 sub->threshold += bytes;
0040
0041 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
0042 aiodma_rb_clear_irq(sub);
0043 spin_unlock(&sub->lock);
0044
0045 snd_pcm_period_elapsed(sub->substream);
0046 }
0047
0048 static void aiodma_compr_irq(struct uniphier_aio_sub *sub)
0049 {
0050 struct snd_compr_runtime *runtime = sub->cstream->runtime;
0051 int bytes = runtime->fragment_size;
0052 int ret;
0053
0054 spin_lock(&sub->lock);
0055 ret = aiodma_rb_set_threshold(sub, sub->compr_bytes,
0056 sub->threshold + bytes);
0057 if (!ret)
0058 sub->threshold += bytes;
0059
0060 aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
0061 aiodma_rb_clear_irq(sub);
0062 spin_unlock(&sub->lock);
0063
0064 snd_compr_fragment_elapsed(sub->cstream);
0065 }
0066
0067 static irqreturn_t aiodma_irq(int irq, void *p)
0068 {
0069 struct platform_device *pdev = p;
0070 struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
0071 irqreturn_t ret = IRQ_NONE;
0072 int i, j;
0073
0074 for (i = 0; i < chip->num_aios; i++) {
0075 struct uniphier_aio *aio = &chip->aios[i];
0076
0077 for (j = 0; j < ARRAY_SIZE(aio->sub); j++) {
0078 struct uniphier_aio_sub *sub = &aio->sub[j];
0079
0080
0081 if (!sub->running || !aiodma_rb_is_irq(sub))
0082 continue;
0083
0084 if (sub->substream)
0085 aiodma_pcm_irq(sub);
0086 if (sub->cstream)
0087 aiodma_compr_irq(sub);
0088
0089 ret = IRQ_HANDLED;
0090 }
0091 }
0092
0093 return ret;
0094 }
0095
0096 static int uniphier_aiodma_open(struct snd_soc_component *component,
0097 struct snd_pcm_substream *substream)
0098 {
0099 struct snd_pcm_runtime *runtime = substream->runtime;
0100
0101 snd_soc_set_runtime_hwparams(substream, &uniphier_aiodma_hw);
0102
0103 return snd_pcm_hw_constraint_step(runtime, 0,
0104 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
0105 }
0106
0107 static int uniphier_aiodma_prepare(struct snd_soc_component *component,
0108 struct snd_pcm_substream *substream)
0109 {
0110 struct snd_pcm_runtime *runtime = substream->runtime;
0111 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
0112 struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
0113 struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
0114 int bytes = runtime->period_size *
0115 runtime->channels * samples_to_bytes(runtime, 1);
0116 unsigned long flags;
0117 int ret;
0118
0119 ret = aiodma_ch_set_param(sub);
0120 if (ret)
0121 return ret;
0122
0123 spin_lock_irqsave(&sub->lock, flags);
0124 ret = aiodma_rb_set_buffer(sub, runtime->dma_addr,
0125 runtime->dma_addr + runtime->dma_bytes,
0126 bytes);
0127 spin_unlock_irqrestore(&sub->lock, flags);
0128 if (ret)
0129 return ret;
0130
0131 return 0;
0132 }
0133
0134 static int uniphier_aiodma_trigger(struct snd_soc_component *component,
0135 struct snd_pcm_substream *substream, int cmd)
0136 {
0137 struct snd_pcm_runtime *runtime = substream->runtime;
0138 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
0139 struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
0140 struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
0141 struct device *dev = &aio->chip->pdev->dev;
0142 int bytes = runtime->period_size *
0143 runtime->channels * samples_to_bytes(runtime, 1);
0144 unsigned long flags;
0145
0146 spin_lock_irqsave(&sub->lock, flags);
0147 switch (cmd) {
0148 case SNDRV_PCM_TRIGGER_START:
0149 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes,
0150 bytes);
0151 aiodma_ch_set_enable(sub, 1);
0152 sub->running = 1;
0153
0154 break;
0155 case SNDRV_PCM_TRIGGER_STOP:
0156 sub->running = 0;
0157 aiodma_ch_set_enable(sub, 0);
0158
0159 break;
0160 default:
0161 dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd);
0162 break;
0163 }
0164 spin_unlock_irqrestore(&sub->lock, flags);
0165
0166 return 0;
0167 }
0168
0169 static snd_pcm_uframes_t uniphier_aiodma_pointer(
0170 struct snd_soc_component *component,
0171 struct snd_pcm_substream *substream)
0172 {
0173 struct snd_pcm_runtime *runtime = substream->runtime;
0174 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
0175 struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
0176 struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
0177 int bytes = runtime->period_size *
0178 runtime->channels * samples_to_bytes(runtime, 1);
0179 unsigned long flags;
0180 snd_pcm_uframes_t pos;
0181
0182 spin_lock_irqsave(&sub->lock, flags);
0183 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
0184
0185 if (sub->swm->dir == PORT_DIR_OUTPUT)
0186 pos = bytes_to_frames(runtime, sub->rd_offs);
0187 else
0188 pos = bytes_to_frames(runtime, sub->wr_offs);
0189 spin_unlock_irqrestore(&sub->lock, flags);
0190
0191 return pos;
0192 }
0193
0194 static int uniphier_aiodma_mmap(struct snd_soc_component *component,
0195 struct snd_pcm_substream *substream,
0196 struct vm_area_struct *vma)
0197 {
0198 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0199
0200 return remap_pfn_range(vma, vma->vm_start,
0201 substream->runtime->dma_addr >> PAGE_SHIFT,
0202 vma->vm_end - vma->vm_start, vma->vm_page_prot);
0203 }
0204
0205 static int uniphier_aiodma_new(struct snd_soc_component *component,
0206 struct snd_soc_pcm_runtime *rtd)
0207 {
0208 struct device *dev = rtd->card->snd_card->dev;
0209 struct snd_pcm *pcm = rtd->pcm;
0210 int ret;
0211
0212 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33));
0213 if (ret)
0214 return ret;
0215
0216 snd_pcm_set_managed_buffer_all(pcm,
0217 SNDRV_DMA_TYPE_DEV, dev,
0218 uniphier_aiodma_hw.buffer_bytes_max,
0219 uniphier_aiodma_hw.buffer_bytes_max);
0220 return 0;
0221 }
0222
0223 static const struct snd_soc_component_driver uniphier_soc_platform = {
0224 .open = uniphier_aiodma_open,
0225 .prepare = uniphier_aiodma_prepare,
0226 .trigger = uniphier_aiodma_trigger,
0227 .pointer = uniphier_aiodma_pointer,
0228 .mmap = uniphier_aiodma_mmap,
0229 .pcm_construct = uniphier_aiodma_new,
0230 .compress_ops = &uniphier_aio_compress_ops,
0231 };
0232
0233 static const struct regmap_config aiodma_regmap_config = {
0234 .reg_bits = 32,
0235 .reg_stride = 4,
0236 .val_bits = 32,
0237 .max_register = 0x7fffc,
0238 .cache_type = REGCACHE_NONE,
0239 };
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 int uniphier_aiodma_soc_register_platform(struct platform_device *pdev)
0252 {
0253 struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
0254 struct device *dev = &pdev->dev;
0255 void __iomem *preg;
0256 int irq, ret;
0257
0258 preg = devm_platform_ioremap_resource(pdev, 0);
0259 if (IS_ERR(preg))
0260 return PTR_ERR(preg);
0261
0262 chip->regmap = devm_regmap_init_mmio(dev, preg,
0263 &aiodma_regmap_config);
0264 if (IS_ERR(chip->regmap))
0265 return PTR_ERR(chip->regmap);
0266
0267 irq = platform_get_irq(pdev, 0);
0268 if (irq < 0)
0269 return irq;
0270
0271 ret = devm_request_irq(dev, irq, aiodma_irq,
0272 IRQF_SHARED, dev_name(dev), pdev);
0273 if (ret)
0274 return ret;
0275
0276 return devm_snd_soc_register_component(dev, &uniphier_soc_platform,
0277 NULL, 0);
0278 }
0279 EXPORT_SYMBOL_GPL(uniphier_aiodma_soc_register_platform);