0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/clk.h>
0014 #include <linux/delay.h>
0015 #include <linux/err.h>
0016 #include <linux/init.h>
0017 #include <linux/module.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/pm_runtime.h>
0021 #include <linux/reset.h>
0022 #include <linux/slab.h>
0023 #include <linux/spinlock.h>
0024
0025 #define STM32_DMAMUX_CCR(x) (0x4 * (x))
0026 #define STM32_DMAMUX_MAX_DMA_REQUESTS 32
0027 #define STM32_DMAMUX_MAX_REQUESTS 255
0028
0029 struct stm32_dmamux {
0030 u32 master;
0031 u32 request;
0032 u32 chan_id;
0033 };
0034
0035 struct stm32_dmamux_data {
0036 struct dma_router dmarouter;
0037 struct clk *clk;
0038 void __iomem *iomem;
0039 u32 dma_requests;
0040 u32 dmamux_requests;
0041 spinlock_t lock;
0042 unsigned long *dma_inuse;
0043 u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS];
0044
0045
0046 u32 dma_reqs[];
0047
0048
0049
0050 };
0051
0052 static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
0053 {
0054 return readl_relaxed(iomem + reg);
0055 }
0056
0057 static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
0058 {
0059 writel_relaxed(val, iomem + reg);
0060 }
0061
0062 static void stm32_dmamux_free(struct device *dev, void *route_data)
0063 {
0064 struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
0065 struct stm32_dmamux *mux = route_data;
0066 unsigned long flags;
0067
0068
0069 spin_lock_irqsave(&dmamux->lock, flags);
0070
0071 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
0072 clear_bit(mux->chan_id, dmamux->dma_inuse);
0073
0074 pm_runtime_put_sync(dev);
0075
0076 spin_unlock_irqrestore(&dmamux->lock, flags);
0077
0078 dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
0079 mux->request, mux->master, mux->chan_id);
0080
0081 kfree(mux);
0082 }
0083
0084 static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
0085 struct of_dma *ofdma)
0086 {
0087 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
0088 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
0089 struct stm32_dmamux *mux;
0090 u32 i, min, max;
0091 int ret;
0092 unsigned long flags;
0093
0094 if (dma_spec->args_count != 3) {
0095 dev_err(&pdev->dev, "invalid number of dma mux args\n");
0096 return ERR_PTR(-EINVAL);
0097 }
0098
0099 if (dma_spec->args[0] > dmamux->dmamux_requests) {
0100 dev_err(&pdev->dev, "invalid mux request number: %d\n",
0101 dma_spec->args[0]);
0102 return ERR_PTR(-EINVAL);
0103 }
0104
0105 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
0106 if (!mux)
0107 return ERR_PTR(-ENOMEM);
0108
0109 spin_lock_irqsave(&dmamux->lock, flags);
0110 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
0111 dmamux->dma_requests);
0112
0113 if (mux->chan_id == dmamux->dma_requests) {
0114 spin_unlock_irqrestore(&dmamux->lock, flags);
0115 dev_err(&pdev->dev, "Run out of free DMA requests\n");
0116 ret = -ENOMEM;
0117 goto error_chan_id;
0118 }
0119 set_bit(mux->chan_id, dmamux->dma_inuse);
0120 spin_unlock_irqrestore(&dmamux->lock, flags);
0121
0122
0123 for (i = 1, min = 0, max = dmamux->dma_reqs[i];
0124 i <= dmamux->dma_reqs[0];
0125 min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
0126 if (mux->chan_id < max)
0127 break;
0128 mux->master = i - 1;
0129
0130
0131 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
0132 if (!dma_spec->np) {
0133 dev_err(&pdev->dev, "can't get dma master\n");
0134 ret = -EINVAL;
0135 goto error;
0136 }
0137
0138
0139 spin_lock_irqsave(&dmamux->lock, flags);
0140 ret = pm_runtime_resume_and_get(&pdev->dev);
0141 if (ret < 0) {
0142 spin_unlock_irqrestore(&dmamux->lock, flags);
0143 goto error;
0144 }
0145 spin_unlock_irqrestore(&dmamux->lock, flags);
0146
0147 mux->request = dma_spec->args[0];
0148
0149
0150 dma_spec->args[3] = dma_spec->args[2];
0151 dma_spec->args[2] = dma_spec->args[1];
0152 dma_spec->args[1] = 0;
0153 dma_spec->args[0] = mux->chan_id - min;
0154 dma_spec->args_count = 4;
0155
0156 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
0157 mux->request);
0158 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
0159 mux->request, mux->master, mux->chan_id);
0160
0161 return mux;
0162
0163 error:
0164 clear_bit(mux->chan_id, dmamux->dma_inuse);
0165
0166 error_chan_id:
0167 kfree(mux);
0168 return ERR_PTR(ret);
0169 }
0170
0171 static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
0172 { .compatible = "st,stm32-dma", },
0173 {},
0174 };
0175
0176 static int stm32_dmamux_probe(struct platform_device *pdev)
0177 {
0178 struct device_node *node = pdev->dev.of_node;
0179 const struct of_device_id *match;
0180 struct device_node *dma_node;
0181 struct stm32_dmamux_data *stm32_dmamux;
0182 struct resource *res;
0183 void __iomem *iomem;
0184 struct reset_control *rst;
0185 int i, count, ret;
0186 u32 dma_req;
0187
0188 if (!node)
0189 return -ENODEV;
0190
0191 count = device_property_count_u32(&pdev->dev, "dma-masters");
0192 if (count < 0) {
0193 dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
0194 return -ENODEV;
0195 }
0196
0197 stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
0198 sizeof(u32) * (count + 1), GFP_KERNEL);
0199 if (!stm32_dmamux)
0200 return -ENOMEM;
0201
0202 dma_req = 0;
0203 for (i = 1; i <= count; i++) {
0204 dma_node = of_parse_phandle(node, "dma-masters", i - 1);
0205
0206 match = of_match_node(stm32_stm32dma_master_match, dma_node);
0207 if (!match) {
0208 dev_err(&pdev->dev, "DMA master is not supported\n");
0209 of_node_put(dma_node);
0210 return -EINVAL;
0211 }
0212
0213 if (of_property_read_u32(dma_node, "dma-requests",
0214 &stm32_dmamux->dma_reqs[i])) {
0215 dev_info(&pdev->dev,
0216 "Missing MUX output information, using %u.\n",
0217 STM32_DMAMUX_MAX_DMA_REQUESTS);
0218 stm32_dmamux->dma_reqs[i] =
0219 STM32_DMAMUX_MAX_DMA_REQUESTS;
0220 }
0221 dma_req += stm32_dmamux->dma_reqs[i];
0222 of_node_put(dma_node);
0223 }
0224
0225 if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
0226 dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
0227 return -ENODEV;
0228 }
0229
0230 stm32_dmamux->dma_requests = dma_req;
0231 stm32_dmamux->dma_reqs[0] = count;
0232 stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
0233 BITS_TO_LONGS(dma_req),
0234 sizeof(unsigned long),
0235 GFP_KERNEL);
0236 if (!stm32_dmamux->dma_inuse)
0237 return -ENOMEM;
0238
0239 if (device_property_read_u32(&pdev->dev, "dma-requests",
0240 &stm32_dmamux->dmamux_requests)) {
0241 stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
0242 dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
0243 stm32_dmamux->dmamux_requests);
0244 }
0245 pm_runtime_get_noresume(&pdev->dev);
0246
0247 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0248 iomem = devm_ioremap_resource(&pdev->dev, res);
0249 if (IS_ERR(iomem))
0250 return PTR_ERR(iomem);
0251
0252 spin_lock_init(&stm32_dmamux->lock);
0253
0254 stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
0255 if (IS_ERR(stm32_dmamux->clk))
0256 return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
0257 "Missing clock controller\n");
0258
0259 ret = clk_prepare_enable(stm32_dmamux->clk);
0260 if (ret < 0) {
0261 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
0262 return ret;
0263 }
0264
0265 rst = devm_reset_control_get(&pdev->dev, NULL);
0266 if (IS_ERR(rst)) {
0267 ret = PTR_ERR(rst);
0268 if (ret == -EPROBE_DEFER)
0269 goto err_clk;
0270 } else if (count > 1) {
0271 reset_control_assert(rst);
0272 udelay(2);
0273 reset_control_deassert(rst);
0274 }
0275
0276 stm32_dmamux->iomem = iomem;
0277 stm32_dmamux->dmarouter.dev = &pdev->dev;
0278 stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
0279
0280 platform_set_drvdata(pdev, stm32_dmamux);
0281 pm_runtime_set_active(&pdev->dev);
0282 pm_runtime_enable(&pdev->dev);
0283
0284 pm_runtime_get_noresume(&pdev->dev);
0285
0286
0287 for (i = 0; i < stm32_dmamux->dma_requests; i++)
0288 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
0289
0290 pm_runtime_put(&pdev->dev);
0291
0292 ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
0293 &stm32_dmamux->dmarouter);
0294 if (ret)
0295 goto pm_disable;
0296
0297 return 0;
0298
0299 pm_disable:
0300 pm_runtime_disable(&pdev->dev);
0301 err_clk:
0302 clk_disable_unprepare(stm32_dmamux->clk);
0303
0304 return ret;
0305 }
0306
0307 #ifdef CONFIG_PM
0308 static int stm32_dmamux_runtime_suspend(struct device *dev)
0309 {
0310 struct platform_device *pdev = to_platform_device(dev);
0311 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
0312
0313 clk_disable_unprepare(stm32_dmamux->clk);
0314
0315 return 0;
0316 }
0317
0318 static int stm32_dmamux_runtime_resume(struct device *dev)
0319 {
0320 struct platform_device *pdev = to_platform_device(dev);
0321 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
0322 int ret;
0323
0324 ret = clk_prepare_enable(stm32_dmamux->clk);
0325 if (ret) {
0326 dev_err(&pdev->dev, "failed to prepare_enable clock\n");
0327 return ret;
0328 }
0329
0330 return 0;
0331 }
0332 #endif
0333
0334 #ifdef CONFIG_PM_SLEEP
0335 static int stm32_dmamux_suspend(struct device *dev)
0336 {
0337 struct platform_device *pdev = to_platform_device(dev);
0338 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
0339 int i, ret;
0340
0341 ret = pm_runtime_resume_and_get(dev);
0342 if (ret < 0)
0343 return ret;
0344
0345 for (i = 0; i < stm32_dmamux->dma_requests; i++)
0346 stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
0347 STM32_DMAMUX_CCR(i));
0348
0349 pm_runtime_put_sync(dev);
0350
0351 pm_runtime_force_suspend(dev);
0352
0353 return 0;
0354 }
0355
0356 static int stm32_dmamux_resume(struct device *dev)
0357 {
0358 struct platform_device *pdev = to_platform_device(dev);
0359 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
0360 int i, ret;
0361
0362 ret = pm_runtime_force_resume(dev);
0363 if (ret < 0)
0364 return ret;
0365
0366 ret = pm_runtime_resume_and_get(dev);
0367 if (ret < 0)
0368 return ret;
0369
0370 for (i = 0; i < stm32_dmamux->dma_requests; i++)
0371 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
0372 stm32_dmamux->ccr[i]);
0373
0374 pm_runtime_put_sync(dev);
0375
0376 return 0;
0377 }
0378 #endif
0379
0380 static const struct dev_pm_ops stm32_dmamux_pm_ops = {
0381 SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
0382 SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
0383 stm32_dmamux_runtime_resume, NULL)
0384 };
0385
0386 static const struct of_device_id stm32_dmamux_match[] = {
0387 { .compatible = "st,stm32h7-dmamux" },
0388 {},
0389 };
0390
0391 static struct platform_driver stm32_dmamux_driver = {
0392 .probe = stm32_dmamux_probe,
0393 .driver = {
0394 .name = "stm32-dmamux",
0395 .of_match_table = stm32_dmamux_match,
0396 .pm = &stm32_dmamux_pm_ops,
0397 },
0398 };
0399
0400 static int __init stm32_dmamux_init(void)
0401 {
0402 return platform_driver_register(&stm32_dmamux_driver);
0403 }
0404 arch_initcall(stm32_dmamux_init);
0405
0406 MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
0407 MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
0408 MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
0409 MODULE_LICENSE("GPL v2");