Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Persistent Storage - platform driver interface parts.
0004  *
0005  * Copyright (C) 2007-2008 Google, Inc.
0006  * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
0007  */
0008 
0009 #define pr_fmt(fmt) "pstore: " fmt
0010 
0011 #include <linux/atomic.h>
0012 #include <linux/types.h>
0013 #include <linux/errno.h>
0014 #include <linux/init.h>
0015 #include <linux/kmsg_dump.h>
0016 #include <linux/console.h>
0017 #include <linux/module.h>
0018 #include <linux/pstore.h>
0019 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
0020 #include <linux/lzo.h>
0021 #endif
0022 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
0023 #include <linux/lz4.h>
0024 #endif
0025 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
0026 #include <linux/zstd.h>
0027 #endif
0028 #include <linux/crypto.h>
0029 #include <linux/string.h>
0030 #include <linux/timer.h>
0031 #include <linux/slab.h>
0032 #include <linux/uaccess.h>
0033 #include <linux/jiffies.h>
0034 #include <linux/workqueue.h>
0035 
0036 #include "internal.h"
0037 
0038 /*
0039  * We defer making "oops" entries appear in pstore - see
0040  * whether the system is actually still running well enough
0041  * to let someone see the entry
0042  */
0043 static int pstore_update_ms = -1;
0044 module_param_named(update_ms, pstore_update_ms, int, 0600);
0045 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
0046          "(default is -1, which means runtime updates are disabled; "
0047          "enabling this option may not be safe; it may lead to further "
0048          "corruption on Oopses)");
0049 
0050 /* Names should be in the same order as the enum pstore_type_id */
0051 static const char * const pstore_type_names[] = {
0052     "dmesg",
0053     "mce",
0054     "console",
0055     "ftrace",
0056     "rtas",
0057     "powerpc-ofw",
0058     "powerpc-common",
0059     "pmsg",
0060     "powerpc-opal",
0061 };
0062 
0063 static int pstore_new_entry;
0064 
0065 static void pstore_timefunc(struct timer_list *);
0066 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
0067 
0068 static void pstore_dowork(struct work_struct *);
0069 static DECLARE_WORK(pstore_work, pstore_dowork);
0070 
0071 /*
0072  * psinfo_lock protects "psinfo" during calls to
0073  * pstore_register(), pstore_unregister(), and
0074  * the filesystem mount/unmount routines.
0075  */
0076 static DEFINE_MUTEX(psinfo_lock);
0077 struct pstore_info *psinfo;
0078 
0079 static char *backend;
0080 module_param(backend, charp, 0444);
0081 MODULE_PARM_DESC(backend, "specific backend to use");
0082 
0083 static char *compress =
0084 #ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
0085         CONFIG_PSTORE_COMPRESS_DEFAULT;
0086 #else
0087         NULL;
0088 #endif
0089 module_param(compress, charp, 0444);
0090 MODULE_PARM_DESC(compress, "compression to use");
0091 
0092 /* Compression parameters */
0093 static struct crypto_comp *tfm;
0094 
0095 struct pstore_zbackend {
0096     int (*zbufsize)(size_t size);
0097     const char *name;
0098 };
0099 
0100 static char *big_oops_buf;
0101 static size_t big_oops_buf_sz;
0102 
0103 /* How much of the console log to snapshot */
0104 unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
0105 
0106 void pstore_set_kmsg_bytes(int bytes)
0107 {
0108     kmsg_bytes = bytes;
0109 }
0110 
0111 /* Tag each group of saved records with a sequence number */
0112 static int  oopscount;
0113 
0114 const char *pstore_type_to_name(enum pstore_type_id type)
0115 {
0116     BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
0117 
0118     if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
0119         return "unknown";
0120 
0121     return pstore_type_names[type];
0122 }
0123 EXPORT_SYMBOL_GPL(pstore_type_to_name);
0124 
0125 enum pstore_type_id pstore_name_to_type(const char *name)
0126 {
0127     int i;
0128 
0129     for (i = 0; i < PSTORE_TYPE_MAX; i++) {
0130         if (!strcmp(pstore_type_names[i], name))
0131             return i;
0132     }
0133 
0134     return PSTORE_TYPE_MAX;
0135 }
0136 EXPORT_SYMBOL_GPL(pstore_name_to_type);
0137 
0138 static void pstore_timer_kick(void)
0139 {
0140     if (pstore_update_ms < 0)
0141         return;
0142 
0143     mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
0144 }
0145 
0146 static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
0147 {
0148     /*
0149      * In case of NMI path, pstore shouldn't be blocked
0150      * regardless of reason.
0151      */
0152     if (in_nmi())
0153         return true;
0154 
0155     switch (reason) {
0156     /* In panic case, other cpus are stopped by smp_send_stop(). */
0157     case KMSG_DUMP_PANIC:
0158     /*
0159      * Emergency restart shouldn't be blocked by spinning on
0160      * pstore_info::buf_lock.
0161      */
0162     case KMSG_DUMP_EMERG:
0163         return true;
0164     default:
0165         return false;
0166     }
0167 }
0168 
0169 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
0170 static int zbufsize_deflate(size_t size)
0171 {
0172     size_t cmpr;
0173 
0174     switch (size) {
0175     /* buffer range for efivars */
0176     case 1000 ... 2000:
0177         cmpr = 56;
0178         break;
0179     case 2001 ... 3000:
0180         cmpr = 54;
0181         break;
0182     case 3001 ... 3999:
0183         cmpr = 52;
0184         break;
0185     /* buffer range for nvram, erst */
0186     case 4000 ... 10000:
0187         cmpr = 45;
0188         break;
0189     default:
0190         cmpr = 60;
0191         break;
0192     }
0193 
0194     return (size * 100) / cmpr;
0195 }
0196 #endif
0197 
0198 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
0199 static int zbufsize_lzo(size_t size)
0200 {
0201     return lzo1x_worst_compress(size);
0202 }
0203 #endif
0204 
0205 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
0206 static int zbufsize_lz4(size_t size)
0207 {
0208     return LZ4_compressBound(size);
0209 }
0210 #endif
0211 
0212 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
0213 static int zbufsize_842(size_t size)
0214 {
0215     return size;
0216 }
0217 #endif
0218 
0219 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
0220 static int zbufsize_zstd(size_t size)
0221 {
0222     return zstd_compress_bound(size);
0223 }
0224 #endif
0225 
0226 static const struct pstore_zbackend *zbackend __ro_after_init;
0227 
0228 static const struct pstore_zbackend zbackends[] = {
0229 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
0230     {
0231         .zbufsize   = zbufsize_deflate,
0232         .name       = "deflate",
0233     },
0234 #endif
0235 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
0236     {
0237         .zbufsize   = zbufsize_lzo,
0238         .name       = "lzo",
0239     },
0240 #endif
0241 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
0242     {
0243         .zbufsize   = zbufsize_lz4,
0244         .name       = "lz4",
0245     },
0246 #endif
0247 #if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
0248     {
0249         .zbufsize   = zbufsize_lz4,
0250         .name       = "lz4hc",
0251     },
0252 #endif
0253 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
0254     {
0255         .zbufsize   = zbufsize_842,
0256         .name       = "842",
0257     },
0258 #endif
0259 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
0260     {
0261         .zbufsize   = zbufsize_zstd,
0262         .name       = "zstd",
0263     },
0264 #endif
0265     { }
0266 };
0267 
0268 static int pstore_compress(const void *in, void *out,
0269                unsigned int inlen, unsigned int outlen)
0270 {
0271     int ret;
0272 
0273     if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
0274         return -EINVAL;
0275 
0276     ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
0277     if (ret) {
0278         pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
0279         return ret;
0280     }
0281 
0282     return outlen;
0283 }
0284 
0285 static void allocate_buf_for_compression(void)
0286 {
0287     struct crypto_comp *ctx;
0288     int size;
0289     char *buf;
0290 
0291     /* Skip if not built-in or compression backend not selected yet. */
0292     if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
0293         return;
0294 
0295     /* Skip if no pstore backend yet or compression init already done. */
0296     if (!psinfo || tfm)
0297         return;
0298 
0299     if (!crypto_has_comp(zbackend->name, 0, 0)) {
0300         pr_err("Unknown compression: %s\n", zbackend->name);
0301         return;
0302     }
0303 
0304     size = zbackend->zbufsize(psinfo->bufsize);
0305     if (size <= 0) {
0306         pr_err("Invalid compression size for %s: %d\n",
0307                zbackend->name, size);
0308         return;
0309     }
0310 
0311     buf = kmalloc(size, GFP_KERNEL);
0312     if (!buf) {
0313         pr_err("Failed %d byte compression buffer allocation for: %s\n",
0314                size, zbackend->name);
0315         return;
0316     }
0317 
0318     ctx = crypto_alloc_comp(zbackend->name, 0, 0);
0319     if (IS_ERR_OR_NULL(ctx)) {
0320         kfree(buf);
0321         pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
0322                PTR_ERR(ctx));
0323         return;
0324     }
0325 
0326     /* A non-NULL big_oops_buf indicates compression is available. */
0327     tfm = ctx;
0328     big_oops_buf_sz = size;
0329     big_oops_buf = buf;
0330 
0331     pr_info("Using crash dump compression: %s\n", zbackend->name);
0332 }
0333 
0334 static void free_buf_for_compression(void)
0335 {
0336     if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
0337         crypto_free_comp(tfm);
0338         tfm = NULL;
0339     }
0340     kfree(big_oops_buf);
0341     big_oops_buf = NULL;
0342     big_oops_buf_sz = 0;
0343 }
0344 
0345 /*
0346  * Called when compression fails, since the printk buffer
0347  * would be fetched for compression calling it again when
0348  * compression fails would have moved the iterator of
0349  * printk buffer which results in fetching old contents.
0350  * Copy the recent messages from big_oops_buf to psinfo->buf
0351  */
0352 static size_t copy_kmsg_to_buffer(int hsize, size_t len)
0353 {
0354     size_t total_len;
0355     size_t diff;
0356 
0357     total_len = hsize + len;
0358 
0359     if (total_len > psinfo->bufsize) {
0360         diff = total_len - psinfo->bufsize + hsize;
0361         memcpy(psinfo->buf, big_oops_buf, hsize);
0362         memcpy(psinfo->buf + hsize, big_oops_buf + diff,
0363                     psinfo->bufsize - hsize);
0364         total_len = psinfo->bufsize;
0365     } else
0366         memcpy(psinfo->buf, big_oops_buf, total_len);
0367 
0368     return total_len;
0369 }
0370 
0371 void pstore_record_init(struct pstore_record *record,
0372             struct pstore_info *psinfo)
0373 {
0374     memset(record, 0, sizeof(*record));
0375 
0376     record->psi = psinfo;
0377 
0378     /* Report zeroed timestamp if called before timekeeping has resumed. */
0379     record->time = ns_to_timespec64(ktime_get_real_fast_ns());
0380 }
0381 
0382 /*
0383  * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
0384  * end of the buffer.
0385  */
0386 static void pstore_dump(struct kmsg_dumper *dumper,
0387             enum kmsg_dump_reason reason)
0388 {
0389     struct kmsg_dump_iter iter;
0390     unsigned long   total = 0;
0391     const char  *why;
0392     unsigned int    part = 1;
0393     unsigned long   flags = 0;
0394     int     ret;
0395 
0396     why = kmsg_dump_reason_str(reason);
0397 
0398     if (pstore_cannot_block_path(reason)) {
0399         if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
0400             pr_err("dump skipped in %s path because of concurrent dump\n",
0401                     in_nmi() ? "NMI" : why);
0402             return;
0403         }
0404     } else {
0405         spin_lock_irqsave(&psinfo->buf_lock, flags);
0406     }
0407 
0408     kmsg_dump_rewind(&iter);
0409 
0410     oopscount++;
0411     while (total < kmsg_bytes) {
0412         char *dst;
0413         size_t dst_size;
0414         int header_size;
0415         int zipped_len = -1;
0416         size_t dump_size;
0417         struct pstore_record record;
0418 
0419         pstore_record_init(&record, psinfo);
0420         record.type = PSTORE_TYPE_DMESG;
0421         record.count = oopscount;
0422         record.reason = reason;
0423         record.part = part;
0424         record.buf = psinfo->buf;
0425 
0426         if (big_oops_buf) {
0427             dst = big_oops_buf;
0428             dst_size = big_oops_buf_sz;
0429         } else {
0430             dst = psinfo->buf;
0431             dst_size = psinfo->bufsize;
0432         }
0433 
0434         /* Write dump header. */
0435         header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
0436                  oopscount, part);
0437         dst_size -= header_size;
0438 
0439         /* Write dump contents. */
0440         if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
0441                       dst_size, &dump_size))
0442             break;
0443 
0444         if (big_oops_buf) {
0445             zipped_len = pstore_compress(dst, psinfo->buf,
0446                         header_size + dump_size,
0447                         psinfo->bufsize);
0448 
0449             if (zipped_len > 0) {
0450                 record.compressed = true;
0451                 record.size = zipped_len;
0452             } else {
0453                 record.size = copy_kmsg_to_buffer(header_size,
0454                                   dump_size);
0455             }
0456         } else {
0457             record.size = header_size + dump_size;
0458         }
0459 
0460         ret = psinfo->write(&record);
0461         if (ret == 0 && reason == KMSG_DUMP_OOPS) {
0462             pstore_new_entry = 1;
0463             pstore_timer_kick();
0464         }
0465 
0466         total += record.size;
0467         part++;
0468     }
0469     spin_unlock_irqrestore(&psinfo->buf_lock, flags);
0470 }
0471 
0472 static struct kmsg_dumper pstore_dumper = {
0473     .dump = pstore_dump,
0474 };
0475 
0476 /*
0477  * Register with kmsg_dump to save last part of console log on panic.
0478  */
0479 static void pstore_register_kmsg(void)
0480 {
0481     kmsg_dump_register(&pstore_dumper);
0482 }
0483 
0484 static void pstore_unregister_kmsg(void)
0485 {
0486     kmsg_dump_unregister(&pstore_dumper);
0487 }
0488 
0489 #ifdef CONFIG_PSTORE_CONSOLE
0490 static void pstore_console_write(struct console *con, const char *s, unsigned c)
0491 {
0492     struct pstore_record record;
0493 
0494     if (!c)
0495         return;
0496 
0497     pstore_record_init(&record, psinfo);
0498     record.type = PSTORE_TYPE_CONSOLE;
0499 
0500     record.buf = (char *)s;
0501     record.size = c;
0502     psinfo->write(&record);
0503 }
0504 
0505 static struct console pstore_console = {
0506     .write  = pstore_console_write,
0507     .index  = -1,
0508 };
0509 
0510 static void pstore_register_console(void)
0511 {
0512     /* Show which backend is going to get console writes. */
0513     strscpy(pstore_console.name, psinfo->name,
0514         sizeof(pstore_console.name));
0515     /*
0516      * Always initialize flags here since prior unregister_console()
0517      * calls may have changed settings (specifically CON_ENABLED).
0518      */
0519     pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
0520     register_console(&pstore_console);
0521 }
0522 
0523 static void pstore_unregister_console(void)
0524 {
0525     unregister_console(&pstore_console);
0526 }
0527 #else
0528 static void pstore_register_console(void) {}
0529 static void pstore_unregister_console(void) {}
0530 #endif
0531 
0532 static int pstore_write_user_compat(struct pstore_record *record,
0533                     const char __user *buf)
0534 {
0535     int ret = 0;
0536 
0537     if (record->buf)
0538         return -EINVAL;
0539 
0540     record->buf = memdup_user(buf, record->size);
0541     if (IS_ERR(record->buf)) {
0542         ret = PTR_ERR(record->buf);
0543         goto out;
0544     }
0545 
0546     ret = record->psi->write(record);
0547 
0548     kfree(record->buf);
0549 out:
0550     record->buf = NULL;
0551 
0552     return unlikely(ret < 0) ? ret : record->size;
0553 }
0554 
0555 /*
0556  * platform specific persistent storage driver registers with
0557  * us here. If pstore is already mounted, call the platform
0558  * read function right away to populate the file system. If not
0559  * then the pstore mount code will call us later to fill out
0560  * the file system.
0561  */
0562 int pstore_register(struct pstore_info *psi)
0563 {
0564     if (backend && strcmp(backend, psi->name)) {
0565         pr_warn("ignoring unexpected backend '%s'\n", psi->name);
0566         return -EPERM;
0567     }
0568 
0569     /* Sanity check flags. */
0570     if (!psi->flags) {
0571         pr_warn("backend '%s' must support at least one frontend\n",
0572             psi->name);
0573         return -EINVAL;
0574     }
0575 
0576     /* Check for required functions. */
0577     if (!psi->read || !psi->write) {
0578         pr_warn("backend '%s' must implement read() and write()\n",
0579             psi->name);
0580         return -EINVAL;
0581     }
0582 
0583     mutex_lock(&psinfo_lock);
0584     if (psinfo) {
0585         pr_warn("backend '%s' already loaded: ignoring '%s'\n",
0586             psinfo->name, psi->name);
0587         mutex_unlock(&psinfo_lock);
0588         return -EBUSY;
0589     }
0590 
0591     if (!psi->write_user)
0592         psi->write_user = pstore_write_user_compat;
0593     psinfo = psi;
0594     mutex_init(&psinfo->read_mutex);
0595     spin_lock_init(&psinfo->buf_lock);
0596 
0597     if (psi->flags & PSTORE_FLAGS_DMESG)
0598         allocate_buf_for_compression();
0599 
0600     pstore_get_records(0);
0601 
0602     if (psi->flags & PSTORE_FLAGS_DMESG) {
0603         pstore_dumper.max_reason = psinfo->max_reason;
0604         pstore_register_kmsg();
0605     }
0606     if (psi->flags & PSTORE_FLAGS_CONSOLE)
0607         pstore_register_console();
0608     if (psi->flags & PSTORE_FLAGS_FTRACE)
0609         pstore_register_ftrace();
0610     if (psi->flags & PSTORE_FLAGS_PMSG)
0611         pstore_register_pmsg();
0612 
0613     /* Start watching for new records, if desired. */
0614     pstore_timer_kick();
0615 
0616     /*
0617      * Update the module parameter backend, so it is visible
0618      * through /sys/module/pstore/parameters/backend
0619      */
0620     backend = kstrdup(psi->name, GFP_KERNEL);
0621 
0622     pr_info("Registered %s as persistent store backend\n", psi->name);
0623 
0624     mutex_unlock(&psinfo_lock);
0625     return 0;
0626 }
0627 EXPORT_SYMBOL_GPL(pstore_register);
0628 
0629 void pstore_unregister(struct pstore_info *psi)
0630 {
0631     /* It's okay to unregister nothing. */
0632     if (!psi)
0633         return;
0634 
0635     mutex_lock(&psinfo_lock);
0636 
0637     /* Only one backend can be registered at a time. */
0638     if (WARN_ON(psi != psinfo)) {
0639         mutex_unlock(&psinfo_lock);
0640         return;
0641     }
0642 
0643     /* Unregister all callbacks. */
0644     if (psi->flags & PSTORE_FLAGS_PMSG)
0645         pstore_unregister_pmsg();
0646     if (psi->flags & PSTORE_FLAGS_FTRACE)
0647         pstore_unregister_ftrace();
0648     if (psi->flags & PSTORE_FLAGS_CONSOLE)
0649         pstore_unregister_console();
0650     if (psi->flags & PSTORE_FLAGS_DMESG)
0651         pstore_unregister_kmsg();
0652 
0653     /* Stop timer and make sure all work has finished. */
0654     del_timer_sync(&pstore_timer);
0655     flush_work(&pstore_work);
0656 
0657     /* Remove all backend records from filesystem tree. */
0658     pstore_put_backend_records(psi);
0659 
0660     free_buf_for_compression();
0661 
0662     psinfo = NULL;
0663     kfree(backend);
0664     backend = NULL;
0665     mutex_unlock(&psinfo_lock);
0666 }
0667 EXPORT_SYMBOL_GPL(pstore_unregister);
0668 
0669 static void decompress_record(struct pstore_record *record)
0670 {
0671     int ret;
0672     int unzipped_len;
0673     char *unzipped, *workspace;
0674 
0675     if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
0676         return;
0677 
0678     /* Only PSTORE_TYPE_DMESG support compression. */
0679     if (record->type != PSTORE_TYPE_DMESG) {
0680         pr_warn("ignored compressed record type %d\n", record->type);
0681         return;
0682     }
0683 
0684     /* Missing compression buffer means compression was not initialized. */
0685     if (!big_oops_buf) {
0686         pr_warn("no decompression method initialized!\n");
0687         return;
0688     }
0689 
0690     /* Allocate enough space to hold max decompression and ECC. */
0691     unzipped_len = big_oops_buf_sz;
0692     workspace = kmalloc(unzipped_len + record->ecc_notice_size,
0693                 GFP_KERNEL);
0694     if (!workspace)
0695         return;
0696 
0697     /* After decompression "unzipped_len" is almost certainly smaller. */
0698     ret = crypto_comp_decompress(tfm, record->buf, record->size,
0699                       workspace, &unzipped_len);
0700     if (ret) {
0701         pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
0702         kfree(workspace);
0703         return;
0704     }
0705 
0706     /* Append ECC notice to decompressed buffer. */
0707     memcpy(workspace + unzipped_len, record->buf + record->size,
0708            record->ecc_notice_size);
0709 
0710     /* Copy decompressed contents into an minimum-sized allocation. */
0711     unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
0712                GFP_KERNEL);
0713     kfree(workspace);
0714     if (!unzipped)
0715         return;
0716 
0717     /* Swap out compressed contents with decompressed contents. */
0718     kfree(record->buf);
0719     record->buf = unzipped;
0720     record->size = unzipped_len;
0721     record->compressed = false;
0722 }
0723 
0724 /*
0725  * Read all the records from one persistent store backend. Create
0726  * files in our filesystem.  Don't warn about -EEXIST errors
0727  * when we are re-scanning the backing store looking to add new
0728  * error records.
0729  */
0730 void pstore_get_backend_records(struct pstore_info *psi,
0731                 struct dentry *root, int quiet)
0732 {
0733     int failed = 0;
0734     unsigned int stop_loop = 65536;
0735 
0736     if (!psi || !root)
0737         return;
0738 
0739     mutex_lock(&psi->read_mutex);
0740     if (psi->open && psi->open(psi))
0741         goto out;
0742 
0743     /*
0744      * Backend callback read() allocates record.buf. decompress_record()
0745      * may reallocate record.buf. On success, pstore_mkfile() will keep
0746      * the record.buf, so free it only on failure.
0747      */
0748     for (; stop_loop; stop_loop--) {
0749         struct pstore_record *record;
0750         int rc;
0751 
0752         record = kzalloc(sizeof(*record), GFP_KERNEL);
0753         if (!record) {
0754             pr_err("out of memory creating record\n");
0755             break;
0756         }
0757         pstore_record_init(record, psi);
0758 
0759         record->size = psi->read(record);
0760 
0761         /* No more records left in backend? */
0762         if (record->size <= 0) {
0763             kfree(record);
0764             break;
0765         }
0766 
0767         decompress_record(record);
0768         rc = pstore_mkfile(root, record);
0769         if (rc) {
0770             /* pstore_mkfile() did not take record, so free it. */
0771             kfree(record->buf);
0772             kfree(record->priv);
0773             kfree(record);
0774             if (rc != -EEXIST || !quiet)
0775                 failed++;
0776         }
0777     }
0778     if (psi->close)
0779         psi->close(psi);
0780 out:
0781     mutex_unlock(&psi->read_mutex);
0782 
0783     if (failed)
0784         pr_warn("failed to create %d record(s) from '%s'\n",
0785             failed, psi->name);
0786     if (!stop_loop)
0787         pr_err("looping? Too many records seen from '%s'\n",
0788             psi->name);
0789 }
0790 
0791 static void pstore_dowork(struct work_struct *work)
0792 {
0793     pstore_get_records(1);
0794 }
0795 
0796 static void pstore_timefunc(struct timer_list *unused)
0797 {
0798     if (pstore_new_entry) {
0799         pstore_new_entry = 0;
0800         schedule_work(&pstore_work);
0801     }
0802 
0803     pstore_timer_kick();
0804 }
0805 
0806 static void __init pstore_choose_compression(void)
0807 {
0808     const struct pstore_zbackend *step;
0809 
0810     if (!compress)
0811         return;
0812 
0813     for (step = zbackends; step->name; step++) {
0814         if (!strcmp(compress, step->name)) {
0815             zbackend = step;
0816             return;
0817         }
0818     }
0819 }
0820 
0821 static int __init pstore_init(void)
0822 {
0823     int ret;
0824 
0825     pstore_choose_compression();
0826 
0827     /*
0828      * Check if any pstore backends registered earlier but did not
0829      * initialize compression because crypto was not ready. If so,
0830      * initialize compression now.
0831      */
0832     allocate_buf_for_compression();
0833 
0834     ret = pstore_init_fs();
0835     if (ret)
0836         free_buf_for_compression();
0837 
0838     return ret;
0839 }
0840 late_initcall(pstore_init);
0841 
0842 static void __exit pstore_exit(void)
0843 {
0844     pstore_exit_fs();
0845 }
0846 module_exit(pstore_exit)
0847 
0848 MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
0849 MODULE_LICENSE("GPL");