Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Generic on-chip SRAM allocation driver
0004  *
0005  * Copyright (C) 2012 Philipp Zabel, Pengutronix
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/delay.h>
0010 #include <linux/genalloc.h>
0011 #include <linux/io.h>
0012 #include <linux/list_sort.h>
0013 #include <linux/of_address.h>
0014 #include <linux/of_device.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/regmap.h>
0017 #include <linux/slab.h>
0018 #include <linux/mfd/syscon.h>
0019 #include <soc/at91/atmel-secumod.h>
0020 
0021 #include "sram.h"
0022 
0023 #define SRAM_GRANULARITY    32
0024 
0025 static ssize_t sram_read(struct file *filp, struct kobject *kobj,
0026              struct bin_attribute *attr,
0027              char *buf, loff_t pos, size_t count)
0028 {
0029     struct sram_partition *part;
0030 
0031     part = container_of(attr, struct sram_partition, battr);
0032 
0033     mutex_lock(&part->lock);
0034     memcpy_fromio(buf, part->base + pos, count);
0035     mutex_unlock(&part->lock);
0036 
0037     return count;
0038 }
0039 
0040 static ssize_t sram_write(struct file *filp, struct kobject *kobj,
0041               struct bin_attribute *attr,
0042               char *buf, loff_t pos, size_t count)
0043 {
0044     struct sram_partition *part;
0045 
0046     part = container_of(attr, struct sram_partition, battr);
0047 
0048     mutex_lock(&part->lock);
0049     memcpy_toio(part->base + pos, buf, count);
0050     mutex_unlock(&part->lock);
0051 
0052     return count;
0053 }
0054 
0055 static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
0056              phys_addr_t start, struct sram_partition *part)
0057 {
0058     int ret;
0059 
0060     part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
0061                       NUMA_NO_NODE, block->label);
0062     if (IS_ERR(part->pool))
0063         return PTR_ERR(part->pool);
0064 
0065     ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
0066                 block->size, NUMA_NO_NODE);
0067     if (ret < 0) {
0068         dev_err(sram->dev, "failed to register subpool: %d\n", ret);
0069         return ret;
0070     }
0071 
0072     return 0;
0073 }
0074 
0075 static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
0076                phys_addr_t start, struct sram_partition *part)
0077 {
0078     sysfs_bin_attr_init(&part->battr);
0079     part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
0080                            "%llx.sram",
0081                            (unsigned long long)start);
0082     if (!part->battr.attr.name)
0083         return -ENOMEM;
0084 
0085     part->battr.attr.mode = S_IRUSR | S_IWUSR;
0086     part->battr.read = sram_read;
0087     part->battr.write = sram_write;
0088     part->battr.size = block->size;
0089 
0090     return device_create_bin_file(sram->dev, &part->battr);
0091 }
0092 
0093 static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
0094                   phys_addr_t start)
0095 {
0096     int ret;
0097     struct sram_partition *part = &sram->partition[sram->partitions];
0098 
0099     mutex_init(&part->lock);
0100 
0101     if (sram->config && sram->config->map_only_reserved) {
0102         void __iomem *virt_base;
0103 
0104         if (sram->no_memory_wc)
0105             virt_base = devm_ioremap_resource(sram->dev, &block->res);
0106         else
0107             virt_base = devm_ioremap_resource_wc(sram->dev, &block->res);
0108 
0109         if (IS_ERR(virt_base)) {
0110             dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res);
0111             return PTR_ERR(virt_base);
0112         }
0113 
0114         part->base = virt_base;
0115     } else {
0116         part->base = sram->virt_base + block->start;
0117     }
0118 
0119     if (block->pool) {
0120         ret = sram_add_pool(sram, block, start, part);
0121         if (ret)
0122             return ret;
0123     }
0124     if (block->export) {
0125         ret = sram_add_export(sram, block, start, part);
0126         if (ret)
0127             return ret;
0128     }
0129     if (block->protect_exec) {
0130         ret = sram_check_protect_exec(sram, block, part);
0131         if (ret)
0132             return ret;
0133 
0134         ret = sram_add_pool(sram, block, start, part);
0135         if (ret)
0136             return ret;
0137 
0138         sram_add_protect_exec(part);
0139     }
0140 
0141     sram->partitions++;
0142 
0143     return 0;
0144 }
0145 
0146 static void sram_free_partitions(struct sram_dev *sram)
0147 {
0148     struct sram_partition *part;
0149 
0150     if (!sram->partitions)
0151         return;
0152 
0153     part = &sram->partition[sram->partitions - 1];
0154     for (; sram->partitions; sram->partitions--, part--) {
0155         if (part->battr.size)
0156             device_remove_bin_file(sram->dev, &part->battr);
0157 
0158         if (part->pool &&
0159             gen_pool_avail(part->pool) < gen_pool_size(part->pool))
0160             dev_err(sram->dev, "removed pool while SRAM allocated\n");
0161     }
0162 }
0163 
0164 static int sram_reserve_cmp(void *priv, const struct list_head *a,
0165                     const struct list_head *b)
0166 {
0167     struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
0168     struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
0169 
0170     return ra->start - rb->start;
0171 }
0172 
0173 static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
0174 {
0175     struct device_node *np = sram->dev->of_node, *child;
0176     unsigned long size, cur_start, cur_size;
0177     struct sram_reserve *rblocks, *block;
0178     struct list_head reserve_list;
0179     unsigned int nblocks, exports = 0;
0180     const char *label;
0181     int ret = 0;
0182 
0183     INIT_LIST_HEAD(&reserve_list);
0184 
0185     size = resource_size(res);
0186 
0187     /*
0188      * We need an additional block to mark the end of the memory region
0189      * after the reserved blocks from the dt are processed.
0190      */
0191     nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
0192     rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
0193     if (!rblocks)
0194         return -ENOMEM;
0195 
0196     block = &rblocks[0];
0197     for_each_available_child_of_node(np, child) {
0198         struct resource child_res;
0199 
0200         ret = of_address_to_resource(child, 0, &child_res);
0201         if (ret < 0) {
0202             dev_err(sram->dev,
0203                 "could not get address for node %pOF\n",
0204                 child);
0205             goto err_chunks;
0206         }
0207 
0208         if (child_res.start < res->start || child_res.end > res->end) {
0209             dev_err(sram->dev,
0210                 "reserved block %pOF outside the sram area\n",
0211                 child);
0212             ret = -EINVAL;
0213             goto err_chunks;
0214         }
0215 
0216         block->start = child_res.start - res->start;
0217         block->size = resource_size(&child_res);
0218         block->res = child_res;
0219         list_add_tail(&block->list, &reserve_list);
0220 
0221         if (of_find_property(child, "export", NULL))
0222             block->export = true;
0223 
0224         if (of_find_property(child, "pool", NULL))
0225             block->pool = true;
0226 
0227         if (of_find_property(child, "protect-exec", NULL))
0228             block->protect_exec = true;
0229 
0230         if ((block->export || block->pool || block->protect_exec) &&
0231             block->size) {
0232             exports++;
0233 
0234             label = NULL;
0235             ret = of_property_read_string(child, "label", &label);
0236             if (ret && ret != -EINVAL) {
0237                 dev_err(sram->dev,
0238                     "%pOF has invalid label name\n",
0239                     child);
0240                 goto err_chunks;
0241             }
0242             if (!label)
0243                 label = child->name;
0244 
0245             block->label = devm_kstrdup(sram->dev,
0246                             label, GFP_KERNEL);
0247             if (!block->label) {
0248                 ret = -ENOMEM;
0249                 goto err_chunks;
0250             }
0251 
0252             dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
0253                 block->export ? "exported " : "", block->label,
0254                 block->start, block->start + block->size);
0255         } else {
0256             dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
0257                 block->start, block->start + block->size);
0258         }
0259 
0260         block++;
0261     }
0262     child = NULL;
0263 
0264     /* the last chunk marks the end of the region */
0265     rblocks[nblocks - 1].start = size;
0266     rblocks[nblocks - 1].size = 0;
0267     list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
0268 
0269     list_sort(NULL, &reserve_list, sram_reserve_cmp);
0270 
0271     if (exports) {
0272         sram->partition = devm_kcalloc(sram->dev,
0273                        exports, sizeof(*sram->partition),
0274                        GFP_KERNEL);
0275         if (!sram->partition) {
0276             ret = -ENOMEM;
0277             goto err_chunks;
0278         }
0279     }
0280 
0281     cur_start = 0;
0282     list_for_each_entry(block, &reserve_list, list) {
0283         /* can only happen if sections overlap */
0284         if (block->start < cur_start) {
0285             dev_err(sram->dev,
0286                 "block at 0x%x starts after current offset 0x%lx\n",
0287                 block->start, cur_start);
0288             ret = -EINVAL;
0289             sram_free_partitions(sram);
0290             goto err_chunks;
0291         }
0292 
0293         if ((block->export || block->pool || block->protect_exec) &&
0294             block->size) {
0295             ret = sram_add_partition(sram, block,
0296                          res->start + block->start);
0297             if (ret) {
0298                 sram_free_partitions(sram);
0299                 goto err_chunks;
0300             }
0301         }
0302 
0303         /* current start is in a reserved block, so continue after it */
0304         if (block->start == cur_start) {
0305             cur_start = block->start + block->size;
0306             continue;
0307         }
0308 
0309         /*
0310          * allocate the space between the current starting
0311          * address and the following reserved block, or the
0312          * end of the region.
0313          */
0314         cur_size = block->start - cur_start;
0315 
0316         if (sram->pool) {
0317             dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
0318                 cur_start, cur_start + cur_size);
0319 
0320             ret = gen_pool_add_virt(sram->pool,
0321                     (unsigned long)sram->virt_base + cur_start,
0322                     res->start + cur_start, cur_size, -1);
0323             if (ret < 0) {
0324                 sram_free_partitions(sram);
0325                 goto err_chunks;
0326             }
0327         }
0328 
0329         /* next allocation after this reserved block */
0330         cur_start = block->start + block->size;
0331     }
0332 
0333 err_chunks:
0334     of_node_put(child);
0335     kfree(rblocks);
0336 
0337     return ret;
0338 }
0339 
0340 static int atmel_securam_wait(void)
0341 {
0342     struct regmap *regmap;
0343     u32 val;
0344 
0345     regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
0346     if (IS_ERR(regmap))
0347         return -ENODEV;
0348 
0349     return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
0350                     val & AT91_SECUMOD_RAMRDY_READY,
0351                     10000, 500000);
0352 }
0353 
0354 static const struct sram_config atmel_securam_config = {
0355     .init = atmel_securam_wait,
0356 };
0357 
0358 /*
0359  * SYSRAM contains areas that are not accessible by the
0360  * kernel, such as the first 256K that is reserved for TZ.
0361  * Accesses to those areas (including speculative accesses)
0362  * trigger SErrors. As such we must map only the areas of
0363  * SYSRAM specified in the device tree.
0364  */
0365 static const struct sram_config tegra_sysram_config = {
0366     .map_only_reserved = true,
0367 };
0368 
0369 static const struct of_device_id sram_dt_ids[] = {
0370     { .compatible = "mmio-sram" },
0371     { .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
0372     { .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
0373     { .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
0374     { .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
0375     {}
0376 };
0377 
0378 static int sram_probe(struct platform_device *pdev)
0379 {
0380     const struct sram_config *config;
0381     struct sram_dev *sram;
0382     int ret;
0383     struct resource *res;
0384 
0385     config = of_device_get_match_data(&pdev->dev);
0386 
0387     sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
0388     if (!sram)
0389         return -ENOMEM;
0390 
0391     sram->dev = &pdev->dev;
0392     sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc");
0393     sram->config = config;
0394 
0395     if (!config || !config->map_only_reserved) {
0396         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0397         if (sram->no_memory_wc)
0398             sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
0399         else
0400             sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
0401         if (IS_ERR(sram->virt_base)) {
0402             dev_err(&pdev->dev, "could not map SRAM registers\n");
0403             return PTR_ERR(sram->virt_base);
0404         }
0405 
0406         sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
0407                           NUMA_NO_NODE, NULL);
0408         if (IS_ERR(sram->pool))
0409             return PTR_ERR(sram->pool);
0410     }
0411 
0412     sram->clk = devm_clk_get(sram->dev, NULL);
0413     if (IS_ERR(sram->clk))
0414         sram->clk = NULL;
0415     else
0416         clk_prepare_enable(sram->clk);
0417 
0418     ret = sram_reserve_regions(sram,
0419             platform_get_resource(pdev, IORESOURCE_MEM, 0));
0420     if (ret)
0421         goto err_disable_clk;
0422 
0423     platform_set_drvdata(pdev, sram);
0424 
0425     if (config && config->init) {
0426         ret = config->init();
0427         if (ret)
0428             goto err_free_partitions;
0429     }
0430 
0431     if (sram->pool)
0432         dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
0433             gen_pool_size(sram->pool) / 1024, sram->virt_base);
0434 
0435     return 0;
0436 
0437 err_free_partitions:
0438     sram_free_partitions(sram);
0439 err_disable_clk:
0440     if (sram->clk)
0441         clk_disable_unprepare(sram->clk);
0442 
0443     return ret;
0444 }
0445 
0446 static int sram_remove(struct platform_device *pdev)
0447 {
0448     struct sram_dev *sram = platform_get_drvdata(pdev);
0449 
0450     sram_free_partitions(sram);
0451 
0452     if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
0453         dev_err(sram->dev, "removed while SRAM allocated\n");
0454 
0455     if (sram->clk)
0456         clk_disable_unprepare(sram->clk);
0457 
0458     return 0;
0459 }
0460 
0461 static struct platform_driver sram_driver = {
0462     .driver = {
0463         .name = "sram",
0464         .of_match_table = sram_dt_ids,
0465     },
0466     .probe = sram_probe,
0467     .remove = sram_remove,
0468 };
0469 
0470 static int __init sram_init(void)
0471 {
0472     return platform_driver_register(&sram_driver);
0473 }
0474 
0475 postcore_initcall(sram_init);