0001
0002
0003
0004
0005
0006 #include <linux/completion.h>
0007 #include <linux/dma-direction.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/err.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/mtd/cfi.h>
0014 #include <linux/mtd/hyperbus.h>
0015 #include <linux/mtd/mtd.h>
0016 #include <linux/mux/consumer.h>
0017 #include <linux/of.h>
0018 #include <linux/of_address.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/sched/task_stack.h>
0021 #include <linux/types.h>
0022
0023 #define AM654_HBMC_CALIB_COUNT 25
0024
0025 struct am654_hbmc_device_priv {
0026 struct completion rx_dma_complete;
0027 phys_addr_t device_base;
0028 struct hyperbus_ctlr *ctlr;
0029 struct dma_chan *rx_chan;
0030 };
0031
0032 struct am654_hbmc_priv {
0033 struct hyperbus_ctlr ctlr;
0034 struct hyperbus_device hbdev;
0035 struct mux_control *mux_ctrl;
0036 };
0037
0038 static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
0039 {
0040 struct map_info *map = &hbdev->map;
0041 struct cfi_private cfi;
0042 int count = AM654_HBMC_CALIB_COUNT;
0043 int pass_count = 0;
0044 int ret;
0045
0046 cfi.interleave = 1;
0047 cfi.device_type = CFI_DEVICETYPE_X16;
0048 cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
0049 cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
0050
0051 while (count--) {
0052 ret = cfi_qry_present(map, 0, &cfi);
0053 if (ret)
0054 pass_count++;
0055 else
0056 pass_count = 0;
0057 if (pass_count == 5)
0058 break;
0059 }
0060
0061 cfi_qry_mode_off(0, map, &cfi);
0062
0063 return ret;
0064 }
0065
0066 static void am654_hbmc_dma_callback(void *param)
0067 {
0068 struct am654_hbmc_device_priv *priv = param;
0069
0070 complete(&priv->rx_dma_complete);
0071 }
0072
0073 static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
0074 unsigned long from, ssize_t len)
0075
0076 {
0077 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
0078 struct dma_chan *rx_chan = priv->rx_chan;
0079 struct dma_async_tx_descriptor *tx;
0080 dma_addr_t dma_dst, dma_src;
0081 dma_cookie_t cookie;
0082 int ret;
0083
0084 if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
0085 return -EINVAL;
0086
0087 dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
0088 if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
0089 dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
0090 return -EIO;
0091 }
0092
0093 dma_src = priv->device_base + from;
0094 tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
0095 if (!tx) {
0096 dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
0097 ret = -EIO;
0098 goto unmap_dma;
0099 }
0100
0101 reinit_completion(&priv->rx_dma_complete);
0102 tx->callback = am654_hbmc_dma_callback;
0103 tx->callback_param = priv;
0104 cookie = dmaengine_submit(tx);
0105
0106 ret = dma_submit_error(cookie);
0107 if (ret) {
0108 dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
0109 goto unmap_dma;
0110 }
0111
0112 dma_async_issue_pending(rx_chan);
0113 if (!wait_for_completion_timeout(&priv->rx_dma_complete, msecs_to_jiffies(len + 1000))) {
0114 dmaengine_terminate_sync(rx_chan);
0115 dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
0116 ret = -ETIMEDOUT;
0117 }
0118
0119 unmap_dma:
0120 dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
0121 return ret;
0122 }
0123
0124 static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
0125 unsigned long from, ssize_t len)
0126 {
0127 struct am654_hbmc_device_priv *priv = hbdev->priv;
0128
0129 if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
0130 memcpy_fromio(to, hbdev->map.virt + from, len);
0131 }
0132
0133 static const struct hyperbus_ops am654_hbmc_ops = {
0134 .calibrate = am654_hbmc_calibrate,
0135 .copy_from = am654_hbmc_read,
0136 };
0137
0138 static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
0139 {
0140 struct dma_chan *rx_chan;
0141 dma_cap_mask_t mask;
0142
0143 dma_cap_zero(mask);
0144 dma_cap_set(DMA_MEMCPY, mask);
0145
0146 rx_chan = dma_request_chan_by_mask(&mask);
0147 if (IS_ERR(rx_chan)) {
0148 if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
0149 return -EPROBE_DEFER;
0150 dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
0151 return 0;
0152 }
0153 priv->rx_chan = rx_chan;
0154 init_completion(&priv->rx_dma_complete);
0155
0156 return 0;
0157 }
0158
0159 static int am654_hbmc_probe(struct platform_device *pdev)
0160 {
0161 struct device_node *np = pdev->dev.of_node;
0162 struct am654_hbmc_device_priv *dev_priv;
0163 struct device *dev = &pdev->dev;
0164 struct am654_hbmc_priv *priv;
0165 struct resource res;
0166 int ret;
0167
0168 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
0169 if (!priv)
0170 return -ENOMEM;
0171
0172 platform_set_drvdata(pdev, priv);
0173
0174 priv->hbdev.np = of_get_next_child(np, NULL);
0175 ret = of_address_to_resource(priv->hbdev.np, 0, &res);
0176 if (ret)
0177 return ret;
0178
0179 if (of_property_read_bool(dev->of_node, "mux-controls")) {
0180 struct mux_control *control = devm_mux_control_get(dev, NULL);
0181
0182 if (IS_ERR(control))
0183 return PTR_ERR(control);
0184
0185 ret = mux_control_select(control, 1);
0186 if (ret) {
0187 dev_err(dev, "Failed to select HBMC mux\n");
0188 return ret;
0189 }
0190 priv->mux_ctrl = control;
0191 }
0192
0193 priv->hbdev.map.size = resource_size(&res);
0194 priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
0195 if (IS_ERR(priv->hbdev.map.virt))
0196 return PTR_ERR(priv->hbdev.map.virt);
0197
0198 priv->ctlr.dev = dev;
0199 priv->ctlr.ops = &am654_hbmc_ops;
0200 priv->hbdev.ctlr = &priv->ctlr;
0201
0202 dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
0203 if (!dev_priv) {
0204 ret = -ENOMEM;
0205 goto disable_mux;
0206 }
0207
0208 priv->hbdev.priv = dev_priv;
0209 dev_priv->device_base = res.start;
0210 dev_priv->ctlr = &priv->ctlr;
0211
0212 ret = am654_hbmc_request_mmap_dma(dev_priv);
0213 if (ret)
0214 goto disable_mux;
0215
0216 ret = hyperbus_register_device(&priv->hbdev);
0217 if (ret) {
0218 dev_err(dev, "failed to register controller\n");
0219 goto release_dma;
0220 }
0221
0222 return 0;
0223 release_dma:
0224 if (dev_priv->rx_chan)
0225 dma_release_channel(dev_priv->rx_chan);
0226 disable_mux:
0227 if (priv->mux_ctrl)
0228 mux_control_deselect(priv->mux_ctrl);
0229 return ret;
0230 }
0231
0232 static int am654_hbmc_remove(struct platform_device *pdev)
0233 {
0234 struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
0235 struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
0236
0237 hyperbus_unregister_device(&priv->hbdev);
0238
0239 if (priv->mux_ctrl)
0240 mux_control_deselect(priv->mux_ctrl);
0241
0242 if (dev_priv->rx_chan)
0243 dma_release_channel(dev_priv->rx_chan);
0244
0245 return 0;
0246 }
0247
0248 static const struct of_device_id am654_hbmc_dt_ids[] = {
0249 {
0250 .compatible = "ti,am654-hbmc",
0251 },
0252 { }
0253 };
0254
0255 MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
0256
0257 static struct platform_driver am654_hbmc_platform_driver = {
0258 .probe = am654_hbmc_probe,
0259 .remove = am654_hbmc_remove,
0260 .driver = {
0261 .name = "hbmc-am654",
0262 .of_match_table = am654_hbmc_dt_ids,
0263 },
0264 };
0265
0266 module_platform_driver(am654_hbmc_platform_driver);
0267
0268 MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
0269 MODULE_LICENSE("GPL v2");
0270 MODULE_ALIAS("platform:hbmc-am654");
0271 MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");