Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012 Google, Inc.
0004  */
0005 
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 
0008 #include <linux/device.h>
0009 #include <linux/err.h>
0010 #include <linux/errno.h>
0011 #include <linux/init.h>
0012 #include <linux/io.h>
0013 #include <linux/kernel.h>
0014 #include <linux/list.h>
0015 #include <linux/memblock.h>
0016 #include <linux/pstore_ram.h>
0017 #include <linux/rslib.h>
0018 #include <linux/slab.h>
0019 #include <linux/uaccess.h>
0020 #include <linux/vmalloc.h>
0021 #include <asm/page.h>
0022 
0023 /**
0024  * struct persistent_ram_buffer - persistent circular RAM buffer
0025  *
0026  * @sig:
0027  *  signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
0028  * @start:
0029  *  offset into @data where the beginning of the stored bytes begin
0030  * @size:
0031  *  number of valid bytes stored in @data
0032  */
0033 struct persistent_ram_buffer {
0034     uint32_t    sig;
0035     atomic_t    start;
0036     atomic_t    size;
0037     uint8_t     data[];
0038 };
0039 
0040 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
0041 
0042 static inline size_t buffer_size(struct persistent_ram_zone *prz)
0043 {
0044     return atomic_read(&prz->buffer->size);
0045 }
0046 
0047 static inline size_t buffer_start(struct persistent_ram_zone *prz)
0048 {
0049     return atomic_read(&prz->buffer->start);
0050 }
0051 
0052 /* increase and wrap the start pointer, returning the old value */
0053 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
0054 {
0055     int old;
0056     int new;
0057     unsigned long flags = 0;
0058 
0059     if (!(prz->flags & PRZ_FLAG_NO_LOCK))
0060         raw_spin_lock_irqsave(&prz->buffer_lock, flags);
0061 
0062     old = atomic_read(&prz->buffer->start);
0063     new = old + a;
0064     while (unlikely(new >= prz->buffer_size))
0065         new -= prz->buffer_size;
0066     atomic_set(&prz->buffer->start, new);
0067 
0068     if (!(prz->flags & PRZ_FLAG_NO_LOCK))
0069         raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
0070 
0071     return old;
0072 }
0073 
0074 /* increase the size counter until it hits the max size */
0075 static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
0076 {
0077     size_t old;
0078     size_t new;
0079     unsigned long flags = 0;
0080 
0081     if (!(prz->flags & PRZ_FLAG_NO_LOCK))
0082         raw_spin_lock_irqsave(&prz->buffer_lock, flags);
0083 
0084     old = atomic_read(&prz->buffer->size);
0085     if (old == prz->buffer_size)
0086         goto exit;
0087 
0088     new = old + a;
0089     if (new > prz->buffer_size)
0090         new = prz->buffer_size;
0091     atomic_set(&prz->buffer->size, new);
0092 
0093 exit:
0094     if (!(prz->flags & PRZ_FLAG_NO_LOCK))
0095         raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
0096 }
0097 
0098 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
0099     uint8_t *data, size_t len, uint8_t *ecc)
0100 {
0101     int i;
0102 
0103     /* Initialize the parity buffer */
0104     memset(prz->ecc_info.par, 0,
0105            prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
0106     encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
0107     for (i = 0; i < prz->ecc_info.ecc_size; i++)
0108         ecc[i] = prz->ecc_info.par[i];
0109 }
0110 
0111 static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
0112     void *data, size_t len, uint8_t *ecc)
0113 {
0114     int i;
0115 
0116     for (i = 0; i < prz->ecc_info.ecc_size; i++)
0117         prz->ecc_info.par[i] = ecc[i];
0118     return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
0119                 NULL, 0, NULL, 0, NULL);
0120 }
0121 
0122 static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
0123     unsigned int start, unsigned int count)
0124 {
0125     struct persistent_ram_buffer *buffer = prz->buffer;
0126     uint8_t *buffer_end = buffer->data + prz->buffer_size;
0127     uint8_t *block;
0128     uint8_t *par;
0129     int ecc_block_size = prz->ecc_info.block_size;
0130     int ecc_size = prz->ecc_info.ecc_size;
0131     int size = ecc_block_size;
0132 
0133     if (!ecc_size)
0134         return;
0135 
0136     block = buffer->data + (start & ~(ecc_block_size - 1));
0137     par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
0138 
0139     do {
0140         if (block + ecc_block_size > buffer_end)
0141             size = buffer_end - block;
0142         persistent_ram_encode_rs8(prz, block, size, par);
0143         block += ecc_block_size;
0144         par += ecc_size;
0145     } while (block < buffer->data + start + count);
0146 }
0147 
0148 static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
0149 {
0150     struct persistent_ram_buffer *buffer = prz->buffer;
0151 
0152     if (!prz->ecc_info.ecc_size)
0153         return;
0154 
0155     persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
0156                   prz->par_header);
0157 }
0158 
0159 static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
0160 {
0161     struct persistent_ram_buffer *buffer = prz->buffer;
0162     uint8_t *block;
0163     uint8_t *par;
0164 
0165     if (!prz->ecc_info.ecc_size)
0166         return;
0167 
0168     block = buffer->data;
0169     par = prz->par_buffer;
0170     while (block < buffer->data + buffer_size(prz)) {
0171         int numerr;
0172         int size = prz->ecc_info.block_size;
0173         if (block + size > buffer->data + prz->buffer_size)
0174             size = buffer->data + prz->buffer_size - block;
0175         numerr = persistent_ram_decode_rs8(prz, block, size, par);
0176         if (numerr > 0) {
0177             pr_devel("error in block %p, %d\n", block, numerr);
0178             prz->corrected_bytes += numerr;
0179         } else if (numerr < 0) {
0180             pr_devel("uncorrectable error in block %p\n", block);
0181             prz->bad_blocks++;
0182         }
0183         block += prz->ecc_info.block_size;
0184         par += prz->ecc_info.ecc_size;
0185     }
0186 }
0187 
0188 static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
0189                    struct persistent_ram_ecc_info *ecc_info)
0190 {
0191     int numerr;
0192     struct persistent_ram_buffer *buffer = prz->buffer;
0193     int ecc_blocks;
0194     size_t ecc_total;
0195 
0196     if (!ecc_info || !ecc_info->ecc_size)
0197         return 0;
0198 
0199     prz->ecc_info.block_size = ecc_info->block_size ?: 128;
0200     prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
0201     prz->ecc_info.symsize = ecc_info->symsize ?: 8;
0202     prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
0203 
0204     ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
0205                   prz->ecc_info.block_size +
0206                   prz->ecc_info.ecc_size);
0207     ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
0208     if (ecc_total >= prz->buffer_size) {
0209         pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
0210                __func__, prz->ecc_info.ecc_size,
0211                ecc_total, prz->buffer_size);
0212         return -EINVAL;
0213     }
0214 
0215     prz->buffer_size -= ecc_total;
0216     prz->par_buffer = buffer->data + prz->buffer_size;
0217     prz->par_header = prz->par_buffer +
0218               ecc_blocks * prz->ecc_info.ecc_size;
0219 
0220     /*
0221      * first consecutive root is 0
0222      * primitive element to generate roots = 1
0223      */
0224     prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
0225                   0, 1, prz->ecc_info.ecc_size);
0226     if (prz->rs_decoder == NULL) {
0227         pr_info("init_rs failed\n");
0228         return -EINVAL;
0229     }
0230 
0231     /* allocate workspace instead of using stack VLA */
0232     prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size,
0233                       sizeof(*prz->ecc_info.par),
0234                       GFP_KERNEL);
0235     if (!prz->ecc_info.par) {
0236         pr_err("cannot allocate ECC parity workspace\n");
0237         return -ENOMEM;
0238     }
0239 
0240     prz->corrected_bytes = 0;
0241     prz->bad_blocks = 0;
0242 
0243     numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
0244                        prz->par_header);
0245     if (numerr > 0) {
0246         pr_info("error in header, %d\n", numerr);
0247         prz->corrected_bytes += numerr;
0248     } else if (numerr < 0) {
0249         pr_info_ratelimited("uncorrectable error in header\n");
0250         prz->bad_blocks++;
0251     }
0252 
0253     return 0;
0254 }
0255 
0256 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
0257     char *str, size_t len)
0258 {
0259     ssize_t ret;
0260 
0261     if (!prz->ecc_info.ecc_size)
0262         return 0;
0263 
0264     if (prz->corrected_bytes || prz->bad_blocks)
0265         ret = snprintf(str, len, ""
0266             "\nECC: %d Corrected bytes, %d unrecoverable blocks\n",
0267             prz->corrected_bytes, prz->bad_blocks);
0268     else
0269         ret = snprintf(str, len, "\nECC: No errors detected\n");
0270 
0271     return ret;
0272 }
0273 
0274 static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
0275     const void *s, unsigned int start, unsigned int count)
0276 {
0277     struct persistent_ram_buffer *buffer = prz->buffer;
0278     memcpy_toio(buffer->data + start, s, count);
0279     persistent_ram_update_ecc(prz, start, count);
0280 }
0281 
0282 static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
0283     const void __user *s, unsigned int start, unsigned int count)
0284 {
0285     struct persistent_ram_buffer *buffer = prz->buffer;
0286     int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ?
0287         -EFAULT : 0;
0288     persistent_ram_update_ecc(prz, start, count);
0289     return ret;
0290 }
0291 
0292 void persistent_ram_save_old(struct persistent_ram_zone *prz)
0293 {
0294     struct persistent_ram_buffer *buffer = prz->buffer;
0295     size_t size = buffer_size(prz);
0296     size_t start = buffer_start(prz);
0297 
0298     if (!size)
0299         return;
0300 
0301     if (!prz->old_log) {
0302         persistent_ram_ecc_old(prz);
0303         prz->old_log = kmalloc(size, GFP_KERNEL);
0304     }
0305     if (!prz->old_log) {
0306         pr_err("failed to allocate buffer\n");
0307         return;
0308     }
0309 
0310     prz->old_log_size = size;
0311     memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
0312     memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
0313 }
0314 
0315 int notrace persistent_ram_write(struct persistent_ram_zone *prz,
0316     const void *s, unsigned int count)
0317 {
0318     int rem;
0319     int c = count;
0320     size_t start;
0321 
0322     if (unlikely(c > prz->buffer_size)) {
0323         s += c - prz->buffer_size;
0324         c = prz->buffer_size;
0325     }
0326 
0327     buffer_size_add(prz, c);
0328 
0329     start = buffer_start_add(prz, c);
0330 
0331     rem = prz->buffer_size - start;
0332     if (unlikely(rem < c)) {
0333         persistent_ram_update(prz, s, start, rem);
0334         s += rem;
0335         c -= rem;
0336         start = 0;
0337     }
0338     persistent_ram_update(prz, s, start, c);
0339 
0340     persistent_ram_update_header_ecc(prz);
0341 
0342     return count;
0343 }
0344 
0345 int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
0346     const void __user *s, unsigned int count)
0347 {
0348     int rem, ret = 0, c = count;
0349     size_t start;
0350 
0351     if (unlikely(c > prz->buffer_size)) {
0352         s += c - prz->buffer_size;
0353         c = prz->buffer_size;
0354     }
0355 
0356     buffer_size_add(prz, c);
0357 
0358     start = buffer_start_add(prz, c);
0359 
0360     rem = prz->buffer_size - start;
0361     if (unlikely(rem < c)) {
0362         ret = persistent_ram_update_user(prz, s, start, rem);
0363         s += rem;
0364         c -= rem;
0365         start = 0;
0366     }
0367     if (likely(!ret))
0368         ret = persistent_ram_update_user(prz, s, start, c);
0369 
0370     persistent_ram_update_header_ecc(prz);
0371 
0372     return unlikely(ret) ? ret : count;
0373 }
0374 
0375 size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
0376 {
0377     return prz->old_log_size;
0378 }
0379 
0380 void *persistent_ram_old(struct persistent_ram_zone *prz)
0381 {
0382     return prz->old_log;
0383 }
0384 
0385 void persistent_ram_free_old(struct persistent_ram_zone *prz)
0386 {
0387     kfree(prz->old_log);
0388     prz->old_log = NULL;
0389     prz->old_log_size = 0;
0390 }
0391 
0392 void persistent_ram_zap(struct persistent_ram_zone *prz)
0393 {
0394     atomic_set(&prz->buffer->start, 0);
0395     atomic_set(&prz->buffer->size, 0);
0396     persistent_ram_update_header_ecc(prz);
0397 }
0398 
0399 #define MEM_TYPE_WCOMBINE   0
0400 #define MEM_TYPE_NONCACHED  1
0401 #define MEM_TYPE_NORMAL     2
0402 
0403 static void *persistent_ram_vmap(phys_addr_t start, size_t size,
0404         unsigned int memtype)
0405 {
0406     struct page **pages;
0407     phys_addr_t page_start;
0408     unsigned int page_count;
0409     pgprot_t prot;
0410     unsigned int i;
0411     void *vaddr;
0412 
0413     page_start = start - offset_in_page(start);
0414     page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
0415 
0416     switch (memtype) {
0417     case MEM_TYPE_NORMAL:
0418         prot = PAGE_KERNEL;
0419         break;
0420     case MEM_TYPE_NONCACHED:
0421         prot = pgprot_noncached(PAGE_KERNEL);
0422         break;
0423     case MEM_TYPE_WCOMBINE:
0424         prot = pgprot_writecombine(PAGE_KERNEL);
0425         break;
0426     default:
0427         pr_err("invalid mem_type=%d\n", memtype);
0428         return NULL;
0429     }
0430 
0431     pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
0432     if (!pages) {
0433         pr_err("%s: Failed to allocate array for %u pages\n",
0434                __func__, page_count);
0435         return NULL;
0436     }
0437 
0438     for (i = 0; i < page_count; i++) {
0439         phys_addr_t addr = page_start + i * PAGE_SIZE;
0440         pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
0441     }
0442     vaddr = vmap(pages, page_count, VM_MAP, prot);
0443     kfree(pages);
0444 
0445     /*
0446      * Since vmap() uses page granularity, we must add the offset
0447      * into the page here, to get the byte granularity address
0448      * into the mapping to represent the actual "start" location.
0449      */
0450     return vaddr + offset_in_page(start);
0451 }
0452 
0453 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
0454         unsigned int memtype, char *label)
0455 {
0456     void *va;
0457 
0458     if (!request_mem_region(start, size, label ?: "ramoops")) {
0459         pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
0460             label ?: "ramoops",
0461             (unsigned long long)size, (unsigned long long)start);
0462         return NULL;
0463     }
0464 
0465     if (memtype)
0466         va = ioremap(start, size);
0467     else
0468         va = ioremap_wc(start, size);
0469 
0470     /*
0471      * Since request_mem_region() and ioremap() are byte-granularity
0472      * there is no need handle anything special like we do when the
0473      * vmap() case in persistent_ram_vmap() above.
0474      */
0475     return va;
0476 }
0477 
0478 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
0479         struct persistent_ram_zone *prz, int memtype)
0480 {
0481     prz->paddr = start;
0482     prz->size = size;
0483 
0484     if (pfn_valid(start >> PAGE_SHIFT))
0485         prz->vaddr = persistent_ram_vmap(start, size, memtype);
0486     else
0487         prz->vaddr = persistent_ram_iomap(start, size, memtype,
0488                           prz->label);
0489 
0490     if (!prz->vaddr) {
0491         pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
0492             (unsigned long long)size, (unsigned long long)start);
0493         return -ENOMEM;
0494     }
0495 
0496     prz->buffer = prz->vaddr;
0497     prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
0498 
0499     return 0;
0500 }
0501 
0502 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
0503                     struct persistent_ram_ecc_info *ecc_info)
0504 {
0505     int ret;
0506     bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
0507 
0508     ret = persistent_ram_init_ecc(prz, ecc_info);
0509     if (ret) {
0510         pr_warn("ECC failed %s\n", prz->label);
0511         return ret;
0512     }
0513 
0514     sig ^= PERSISTENT_RAM_SIG;
0515 
0516     if (prz->buffer->sig == sig) {
0517         if (buffer_size(prz) == 0) {
0518             pr_debug("found existing empty buffer\n");
0519             return 0;
0520         }
0521 
0522         if (buffer_size(prz) > prz->buffer_size ||
0523             buffer_start(prz) > buffer_size(prz)) {
0524             pr_info("found existing invalid buffer, size %zu, start %zu\n",
0525                 buffer_size(prz), buffer_start(prz));
0526             zap = true;
0527         } else {
0528             pr_debug("found existing buffer, size %zu, start %zu\n",
0529                  buffer_size(prz), buffer_start(prz));
0530             persistent_ram_save_old(prz);
0531         }
0532     } else {
0533         pr_debug("no valid data in buffer (sig = 0x%08x)\n",
0534              prz->buffer->sig);
0535         prz->buffer->sig = sig;
0536         zap = true;
0537     }
0538 
0539     /* Reset missing, invalid, or single-use memory area. */
0540     if (zap)
0541         persistent_ram_zap(prz);
0542 
0543     return 0;
0544 }
0545 
0546 void persistent_ram_free(struct persistent_ram_zone *prz)
0547 {
0548     if (!prz)
0549         return;
0550 
0551     if (prz->vaddr) {
0552         if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
0553             /* We must vunmap() at page-granularity. */
0554             vunmap(prz->vaddr - offset_in_page(prz->paddr));
0555         } else {
0556             iounmap(prz->vaddr);
0557             release_mem_region(prz->paddr, prz->size);
0558         }
0559         prz->vaddr = NULL;
0560     }
0561     if (prz->rs_decoder) {
0562         free_rs(prz->rs_decoder);
0563         prz->rs_decoder = NULL;
0564     }
0565     kfree(prz->ecc_info.par);
0566     prz->ecc_info.par = NULL;
0567 
0568     persistent_ram_free_old(prz);
0569     kfree(prz->label);
0570     kfree(prz);
0571 }
0572 
0573 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
0574             u32 sig, struct persistent_ram_ecc_info *ecc_info,
0575             unsigned int memtype, u32 flags, char *label)
0576 {
0577     struct persistent_ram_zone *prz;
0578     int ret = -ENOMEM;
0579 
0580     prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
0581     if (!prz) {
0582         pr_err("failed to allocate persistent ram zone\n");
0583         goto err;
0584     }
0585 
0586     /* Initialize general buffer state. */
0587     raw_spin_lock_init(&prz->buffer_lock);
0588     prz->flags = flags;
0589     prz->label = kstrdup(label, GFP_KERNEL);
0590 
0591     ret = persistent_ram_buffer_map(start, size, prz, memtype);
0592     if (ret)
0593         goto err;
0594 
0595     ret = persistent_ram_post_init(prz, sig, ecc_info);
0596     if (ret)
0597         goto err;
0598 
0599     pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
0600         prz->label, prz->size, (unsigned long long)prz->paddr,
0601         sizeof(*prz->buffer), prz->buffer_size,
0602         prz->size - sizeof(*prz->buffer) - prz->buffer_size,
0603         prz->ecc_info.ecc_size, prz->ecc_info.block_size);
0604 
0605     return prz;
0606 err:
0607     persistent_ram_free(prz);
0608     return ERR_PTR(ret);
0609 }