0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/device.h>
0011 #include <linux/err.h>
0012 #include <linux/module.h>
0013 #include <linux/mutex.h>
0014 #include <linux/slab.h>
0015 #include <linux/of.h>
0016 #include <linux/of_dma.h>
0017
0018 #include "dmaengine.h"
0019
0020 static LIST_HEAD(of_dma_list);
0021 static DEFINE_MUTEX(of_dma_lock);
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
0033 {
0034 struct of_dma *ofdma;
0035
0036 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
0037 if (ofdma->of_node == dma_spec->np)
0038 return ofdma;
0039
0040 pr_debug("%s: can't find DMA controller %pOF\n", __func__,
0041 dma_spec->np);
0042
0043 return NULL;
0044 }
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
0056 struct of_dma *ofdma)
0057 {
0058 struct dma_chan *chan;
0059 struct of_dma *ofdma_target;
0060 struct of_phandle_args dma_spec_target;
0061 void *route_data;
0062
0063
0064 memcpy(&dma_spec_target, dma_spec, sizeof(dma_spec_target));
0065 route_data = ofdma->of_dma_route_allocate(&dma_spec_target, ofdma);
0066 if (IS_ERR(route_data))
0067 return NULL;
0068
0069 ofdma_target = of_dma_find_controller(&dma_spec_target);
0070 if (!ofdma_target) {
0071 ofdma->dma_router->route_free(ofdma->dma_router->dev,
0072 route_data);
0073 chan = ERR_PTR(-EPROBE_DEFER);
0074 goto err;
0075 }
0076
0077 chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
0078 if (IS_ERR_OR_NULL(chan)) {
0079 ofdma->dma_router->route_free(ofdma->dma_router->dev,
0080 route_data);
0081 } else {
0082 int ret = 0;
0083
0084 chan->router = ofdma->dma_router;
0085 chan->route_data = route_data;
0086
0087 if (chan->device->device_router_config)
0088 ret = chan->device->device_router_config(chan);
0089
0090 if (ret) {
0091 dma_release_channel(chan);
0092 chan = ERR_PTR(ret);
0093 }
0094 }
0095
0096 err:
0097
0098
0099
0100
0101 of_node_put(dma_spec_target.np);
0102 return chan;
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 int of_dma_controller_register(struct device_node *np,
0119 struct dma_chan *(*of_dma_xlate)
0120 (struct of_phandle_args *, struct of_dma *),
0121 void *data)
0122 {
0123 struct of_dma *ofdma;
0124
0125 if (!np || !of_dma_xlate) {
0126 pr_err("%s: not enough information provided\n", __func__);
0127 return -EINVAL;
0128 }
0129
0130 ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
0131 if (!ofdma)
0132 return -ENOMEM;
0133
0134 ofdma->of_node = np;
0135 ofdma->of_dma_xlate = of_dma_xlate;
0136 ofdma->of_dma_data = data;
0137
0138
0139 mutex_lock(&of_dma_lock);
0140 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
0141 mutex_unlock(&of_dma_lock);
0142
0143 return 0;
0144 }
0145 EXPORT_SYMBOL_GPL(of_dma_controller_register);
0146
0147
0148
0149
0150
0151
0152
0153 void of_dma_controller_free(struct device_node *np)
0154 {
0155 struct of_dma *ofdma;
0156
0157 mutex_lock(&of_dma_lock);
0158
0159 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
0160 if (ofdma->of_node == np) {
0161 list_del(&ofdma->of_dma_controllers);
0162 kfree(ofdma);
0163 break;
0164 }
0165
0166 mutex_unlock(&of_dma_lock);
0167 }
0168 EXPORT_SYMBOL_GPL(of_dma_controller_free);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 int of_dma_router_register(struct device_node *np,
0186 void *(*of_dma_route_allocate)
0187 (struct of_phandle_args *, struct of_dma *),
0188 struct dma_router *dma_router)
0189 {
0190 struct of_dma *ofdma;
0191
0192 if (!np || !of_dma_route_allocate || !dma_router) {
0193 pr_err("%s: not enough information provided\n", __func__);
0194 return -EINVAL;
0195 }
0196
0197 ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
0198 if (!ofdma)
0199 return -ENOMEM;
0200
0201 ofdma->of_node = np;
0202 ofdma->of_dma_xlate = of_dma_router_xlate;
0203 ofdma->of_dma_route_allocate = of_dma_route_allocate;
0204 ofdma->dma_router = dma_router;
0205
0206
0207 mutex_lock(&of_dma_lock);
0208 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
0209 mutex_unlock(&of_dma_lock);
0210
0211 return 0;
0212 }
0213 EXPORT_SYMBOL_GPL(of_dma_router_register);
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 static int of_dma_match_channel(struct device_node *np, const char *name,
0227 int index, struct of_phandle_args *dma_spec)
0228 {
0229 const char *s;
0230
0231 if (of_property_read_string_index(np, "dma-names", index, &s))
0232 return -ENODEV;
0233
0234 if (strcmp(name, s))
0235 return -ENODEV;
0236
0237 if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
0238 dma_spec))
0239 return -ENODEV;
0240
0241 return 0;
0242 }
0243
0244
0245
0246
0247
0248
0249
0250
0251 struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
0252 const char *name)
0253 {
0254 struct of_phandle_args dma_spec;
0255 struct of_dma *ofdma;
0256 struct dma_chan *chan;
0257 int count, i, start;
0258 int ret_no_channel = -ENODEV;
0259 static atomic_t last_index;
0260
0261 if (!np || !name) {
0262 pr_err("%s: not enough information provided\n", __func__);
0263 return ERR_PTR(-ENODEV);
0264 }
0265
0266
0267 if (!of_find_property(np, "dmas", NULL))
0268 return ERR_PTR(-ENODEV);
0269
0270 count = of_property_count_strings(np, "dma-names");
0271 if (count < 0) {
0272 pr_err("%s: dma-names property of node '%pOF' missing or empty\n",
0273 __func__, np);
0274 return ERR_PTR(-ENODEV);
0275 }
0276
0277
0278
0279
0280
0281 start = atomic_inc_return(&last_index);
0282 for (i = 0; i < count; i++) {
0283 if (of_dma_match_channel(np, name,
0284 (i + start) % count,
0285 &dma_spec))
0286 continue;
0287
0288 mutex_lock(&of_dma_lock);
0289 ofdma = of_dma_find_controller(&dma_spec);
0290
0291 if (ofdma) {
0292 chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
0293 } else {
0294 ret_no_channel = -EPROBE_DEFER;
0295 chan = NULL;
0296 }
0297
0298 mutex_unlock(&of_dma_lock);
0299
0300 of_node_put(dma_spec.np);
0301
0302 if (chan)
0303 return chan;
0304 }
0305
0306 return ERR_PTR(ret_no_channel);
0307 }
0308 EXPORT_SYMBOL_GPL(of_dma_request_slave_channel);
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
0322 struct of_dma *ofdma)
0323 {
0324 int count = dma_spec->args_count;
0325 struct of_dma_filter_info *info = ofdma->of_dma_data;
0326
0327 if (!info || !info->filter_fn)
0328 return NULL;
0329
0330 if (count != 1)
0331 return NULL;
0332
0333 return __dma_request_channel(&info->dma_cap, info->filter_fn,
0334 &dma_spec->args[0], dma_spec->np);
0335 }
0336 EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
0352 struct of_dma *ofdma)
0353 {
0354 struct dma_device *dev = ofdma->of_dma_data;
0355 struct dma_chan *chan, *candidate = NULL;
0356
0357 if (!dev || dma_spec->args_count != 1)
0358 return NULL;
0359
0360 list_for_each_entry(chan, &dev->channels, device_node)
0361 if (chan->chan_id == dma_spec->args[0]) {
0362 candidate = chan;
0363 break;
0364 }
0365
0366 if (!candidate)
0367 return NULL;
0368
0369 return dma_get_slave_channel(candidate);
0370 }
0371 EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id);