Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright(c) 2017 Intel Corporation. All rights reserved.
0004  */
0005 #include <linux/libnvdimm.h>
0006 #include <linux/badblocks.h>
0007 #include <linux/export.h>
0008 #include <linux/module.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/device.h>
0011 #include <linux/ctype.h>
0012 #include <linux/ndctl.h>
0013 #include <linux/mutex.h>
0014 #include <linux/slab.h>
0015 #include <linux/io.h>
0016 #include "nd-core.h"
0017 #include "nd.h"
0018 
0019 void badrange_init(struct badrange *badrange)
0020 {
0021     INIT_LIST_HEAD(&badrange->list);
0022     spin_lock_init(&badrange->lock);
0023 }
0024 EXPORT_SYMBOL_GPL(badrange_init);
0025 
0026 static void append_badrange_entry(struct badrange *badrange,
0027         struct badrange_entry *bre, u64 addr, u64 length)
0028 {
0029     lockdep_assert_held(&badrange->lock);
0030     bre->start = addr;
0031     bre->length = length;
0032     list_add_tail(&bre->list, &badrange->list);
0033 }
0034 
0035 static int alloc_and_append_badrange_entry(struct badrange *badrange,
0036         u64 addr, u64 length, gfp_t flags)
0037 {
0038     struct badrange_entry *bre;
0039 
0040     bre = kzalloc(sizeof(*bre), flags);
0041     if (!bre)
0042         return -ENOMEM;
0043 
0044     append_badrange_entry(badrange, bre, addr, length);
0045     return 0;
0046 }
0047 
0048 static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
0049 {
0050     struct badrange_entry *bre, *bre_new;
0051 
0052     spin_unlock(&badrange->lock);
0053     bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
0054     spin_lock(&badrange->lock);
0055 
0056     if (list_empty(&badrange->list)) {
0057         if (!bre_new)
0058             return -ENOMEM;
0059         append_badrange_entry(badrange, bre_new, addr, length);
0060         return 0;
0061     }
0062 
0063     /*
0064      * There is a chance this is a duplicate, check for those first.
0065      * This will be the common case as ARS_STATUS returns all known
0066      * errors in the SPA space, and we can't query it per region
0067      */
0068     list_for_each_entry(bre, &badrange->list, list)
0069         if (bre->start == addr) {
0070             /* If length has changed, update this list entry */
0071             if (bre->length != length)
0072                 bre->length = length;
0073             kfree(bre_new);
0074             return 0;
0075         }
0076 
0077     /*
0078      * If not a duplicate or a simple length update, add the entry as is,
0079      * as any overlapping ranges will get resolved when the list is consumed
0080      * and converted to badblocks
0081      */
0082     if (!bre_new)
0083         return -ENOMEM;
0084     append_badrange_entry(badrange, bre_new, addr, length);
0085 
0086     return 0;
0087 }
0088 
0089 int badrange_add(struct badrange *badrange, u64 addr, u64 length)
0090 {
0091     int rc;
0092 
0093     spin_lock(&badrange->lock);
0094     rc = add_badrange(badrange, addr, length);
0095     spin_unlock(&badrange->lock);
0096 
0097     return rc;
0098 }
0099 EXPORT_SYMBOL_GPL(badrange_add);
0100 
0101 void badrange_forget(struct badrange *badrange, phys_addr_t start,
0102         unsigned int len)
0103 {
0104     struct list_head *badrange_list = &badrange->list;
0105     u64 clr_end = start + len - 1;
0106     struct badrange_entry *bre, *next;
0107 
0108     spin_lock(&badrange->lock);
0109 
0110     /*
0111      * [start, clr_end] is the badrange interval being cleared.
0112      * [bre->start, bre_end] is the badrange_list entry we're comparing
0113      * the above interval against. The badrange list entry may need
0114      * to be modified (update either start or length), deleted, or
0115      * split into two based on the overlap characteristics
0116      */
0117 
0118     list_for_each_entry_safe(bre, next, badrange_list, list) {
0119         u64 bre_end = bre->start + bre->length - 1;
0120 
0121         /* Skip intervals with no intersection */
0122         if (bre_end < start)
0123             continue;
0124         if (bre->start >  clr_end)
0125             continue;
0126         /* Delete completely overlapped badrange entries */
0127         if ((bre->start >= start) && (bre_end <= clr_end)) {
0128             list_del(&bre->list);
0129             kfree(bre);
0130             continue;
0131         }
0132         /* Adjust start point of partially cleared entries */
0133         if ((start <= bre->start) && (clr_end > bre->start)) {
0134             bre->length -= clr_end - bre->start + 1;
0135             bre->start = clr_end + 1;
0136             continue;
0137         }
0138         /* Adjust bre->length for partial clearing at the tail end */
0139         if ((bre->start < start) && (bre_end <= clr_end)) {
0140             /* bre->start remains the same */
0141             bre->length = start - bre->start;
0142             continue;
0143         }
0144         /*
0145          * If clearing in the middle of an entry, we split it into
0146          * two by modifying the current entry to represent one half of
0147          * the split, and adding a new entry for the second half.
0148          */
0149         if ((bre->start < start) && (bre_end > clr_end)) {
0150             u64 new_start = clr_end + 1;
0151             u64 new_len = bre_end - new_start + 1;
0152 
0153             /* Add new entry covering the right half */
0154             alloc_and_append_badrange_entry(badrange, new_start,
0155                     new_len, GFP_NOWAIT);
0156             /* Adjust this entry to cover the left half */
0157             bre->length = start - bre->start;
0158             continue;
0159         }
0160     }
0161     spin_unlock(&badrange->lock);
0162 }
0163 EXPORT_SYMBOL_GPL(badrange_forget);
0164 
0165 static void set_badblock(struct badblocks *bb, sector_t s, int num)
0166 {
0167     dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
0168             (u64) s * 512, (u64) num * 512);
0169     /* this isn't an error as the hardware will still throw an exception */
0170     if (badblocks_set(bb, s, num, 1))
0171         dev_info_once(bb->dev, "%s: failed for sector %llx\n",
0172                 __func__, (u64) s);
0173 }
0174 
0175 /**
0176  * __add_badblock_range() - Convert a physical address range to bad sectors
0177  * @bb:     badblocks instance to populate
0178  * @ns_offset:  namespace offset where the error range begins (in bytes)
0179  * @len:    number of bytes of badrange to be added
0180  *
0181  * This assumes that the range provided with (ns_offset, len) is within
0182  * the bounds of physical addresses for this namespace, i.e. lies in the
0183  * interval [ns_start, ns_start + ns_size)
0184  */
0185 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
0186 {
0187     const unsigned int sector_size = 512;
0188     sector_t start_sector, end_sector;
0189     u64 num_sectors;
0190     u32 rem;
0191 
0192     start_sector = div_u64(ns_offset, sector_size);
0193     end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
0194     if (rem)
0195         end_sector++;
0196     num_sectors = end_sector - start_sector;
0197 
0198     if (unlikely(num_sectors > (u64)INT_MAX)) {
0199         u64 remaining = num_sectors;
0200         sector_t s = start_sector;
0201 
0202         while (remaining) {
0203             int done = min_t(u64, remaining, INT_MAX);
0204 
0205             set_badblock(bb, s, done);
0206             remaining -= done;
0207             s += done;
0208         }
0209     } else
0210         set_badblock(bb, start_sector, num_sectors);
0211 }
0212 
0213 static void badblocks_populate(struct badrange *badrange,
0214         struct badblocks *bb, const struct range *range)
0215 {
0216     struct badrange_entry *bre;
0217 
0218     if (list_empty(&badrange->list))
0219         return;
0220 
0221     list_for_each_entry(bre, &badrange->list, list) {
0222         u64 bre_end = bre->start + bre->length - 1;
0223 
0224         /* Discard intervals with no intersection */
0225         if (bre_end < range->start)
0226             continue;
0227         if (bre->start > range->end)
0228             continue;
0229         /* Deal with any overlap after start of the namespace */
0230         if (bre->start >= range->start) {
0231             u64 start = bre->start;
0232             u64 len;
0233 
0234             if (bre_end <= range->end)
0235                 len = bre->length;
0236             else
0237                 len = range->start + range_len(range)
0238                     - bre->start;
0239             __add_badblock_range(bb, start - range->start, len);
0240             continue;
0241         }
0242         /*
0243          * Deal with overlap for badrange starting before
0244          * the namespace.
0245          */
0246         if (bre->start < range->start) {
0247             u64 len;
0248 
0249             if (bre_end < range->end)
0250                 len = bre->start + bre->length - range->start;
0251             else
0252                 len = range_len(range);
0253             __add_badblock_range(bb, 0, len);
0254         }
0255     }
0256 }
0257 
0258 /**
0259  * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
0260  * @region: parent region of the range to interrogate
0261  * @bb: badblocks instance to populate
0262  * @res: resource range to consider
0263  *
0264  * The badrange list generated during bus initialization may contain
0265  * multiple, possibly overlapping physical address ranges.  Compare each
0266  * of these ranges to the resource range currently being initialized,
0267  * and add badblocks entries for all matching sub-ranges
0268  */
0269 void nvdimm_badblocks_populate(struct nd_region *nd_region,
0270         struct badblocks *bb, const struct range *range)
0271 {
0272     struct nvdimm_bus *nvdimm_bus;
0273 
0274     if (!is_memory(&nd_region->dev)) {
0275         dev_WARN_ONCE(&nd_region->dev, 1,
0276                 "%s only valid for pmem regions\n", __func__);
0277         return;
0278     }
0279     nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
0280 
0281     nvdimm_bus_lock(&nvdimm_bus->dev);
0282     badblocks_populate(&nvdimm_bus->badrange, bb, range);
0283     nvdimm_bus_unlock(&nvdimm_bus->dev);
0284 }
0285 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);