0001
0002
0003
0004
0005 #include <linux/libnvdimm.h>
0006 #include <linux/badblocks.h>
0007 #include <linux/export.h>
0008 #include <linux/module.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/device.h>
0011 #include <linux/ctype.h>
0012 #include <linux/ndctl.h>
0013 #include <linux/mutex.h>
0014 #include <linux/slab.h>
0015 #include <linux/io.h>
0016 #include "nd-core.h"
0017 #include "nd.h"
0018
0019 void badrange_init(struct badrange *badrange)
0020 {
0021 INIT_LIST_HEAD(&badrange->list);
0022 spin_lock_init(&badrange->lock);
0023 }
0024 EXPORT_SYMBOL_GPL(badrange_init);
0025
0026 static void append_badrange_entry(struct badrange *badrange,
0027 struct badrange_entry *bre, u64 addr, u64 length)
0028 {
0029 lockdep_assert_held(&badrange->lock);
0030 bre->start = addr;
0031 bre->length = length;
0032 list_add_tail(&bre->list, &badrange->list);
0033 }
0034
0035 static int alloc_and_append_badrange_entry(struct badrange *badrange,
0036 u64 addr, u64 length, gfp_t flags)
0037 {
0038 struct badrange_entry *bre;
0039
0040 bre = kzalloc(sizeof(*bre), flags);
0041 if (!bre)
0042 return -ENOMEM;
0043
0044 append_badrange_entry(badrange, bre, addr, length);
0045 return 0;
0046 }
0047
0048 static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
0049 {
0050 struct badrange_entry *bre, *bre_new;
0051
0052 spin_unlock(&badrange->lock);
0053 bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
0054 spin_lock(&badrange->lock);
0055
0056 if (list_empty(&badrange->list)) {
0057 if (!bre_new)
0058 return -ENOMEM;
0059 append_badrange_entry(badrange, bre_new, addr, length);
0060 return 0;
0061 }
0062
0063
0064
0065
0066
0067
0068 list_for_each_entry(bre, &badrange->list, list)
0069 if (bre->start == addr) {
0070
0071 if (bre->length != length)
0072 bre->length = length;
0073 kfree(bre_new);
0074 return 0;
0075 }
0076
0077
0078
0079
0080
0081
0082 if (!bre_new)
0083 return -ENOMEM;
0084 append_badrange_entry(badrange, bre_new, addr, length);
0085
0086 return 0;
0087 }
0088
0089 int badrange_add(struct badrange *badrange, u64 addr, u64 length)
0090 {
0091 int rc;
0092
0093 spin_lock(&badrange->lock);
0094 rc = add_badrange(badrange, addr, length);
0095 spin_unlock(&badrange->lock);
0096
0097 return rc;
0098 }
0099 EXPORT_SYMBOL_GPL(badrange_add);
0100
0101 void badrange_forget(struct badrange *badrange, phys_addr_t start,
0102 unsigned int len)
0103 {
0104 struct list_head *badrange_list = &badrange->list;
0105 u64 clr_end = start + len - 1;
0106 struct badrange_entry *bre, *next;
0107
0108 spin_lock(&badrange->lock);
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 list_for_each_entry_safe(bre, next, badrange_list, list) {
0119 u64 bre_end = bre->start + bre->length - 1;
0120
0121
0122 if (bre_end < start)
0123 continue;
0124 if (bre->start > clr_end)
0125 continue;
0126
0127 if ((bre->start >= start) && (bre_end <= clr_end)) {
0128 list_del(&bre->list);
0129 kfree(bre);
0130 continue;
0131 }
0132
0133 if ((start <= bre->start) && (clr_end > bre->start)) {
0134 bre->length -= clr_end - bre->start + 1;
0135 bre->start = clr_end + 1;
0136 continue;
0137 }
0138
0139 if ((bre->start < start) && (bre_end <= clr_end)) {
0140
0141 bre->length = start - bre->start;
0142 continue;
0143 }
0144
0145
0146
0147
0148
0149 if ((bre->start < start) && (bre_end > clr_end)) {
0150 u64 new_start = clr_end + 1;
0151 u64 new_len = bre_end - new_start + 1;
0152
0153
0154 alloc_and_append_badrange_entry(badrange, new_start,
0155 new_len, GFP_NOWAIT);
0156
0157 bre->length = start - bre->start;
0158 continue;
0159 }
0160 }
0161 spin_unlock(&badrange->lock);
0162 }
0163 EXPORT_SYMBOL_GPL(badrange_forget);
0164
0165 static void set_badblock(struct badblocks *bb, sector_t s, int num)
0166 {
0167 dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
0168 (u64) s * 512, (u64) num * 512);
0169
0170 if (badblocks_set(bb, s, num, 1))
0171 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
0172 __func__, (u64) s);
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
0186 {
0187 const unsigned int sector_size = 512;
0188 sector_t start_sector, end_sector;
0189 u64 num_sectors;
0190 u32 rem;
0191
0192 start_sector = div_u64(ns_offset, sector_size);
0193 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
0194 if (rem)
0195 end_sector++;
0196 num_sectors = end_sector - start_sector;
0197
0198 if (unlikely(num_sectors > (u64)INT_MAX)) {
0199 u64 remaining = num_sectors;
0200 sector_t s = start_sector;
0201
0202 while (remaining) {
0203 int done = min_t(u64, remaining, INT_MAX);
0204
0205 set_badblock(bb, s, done);
0206 remaining -= done;
0207 s += done;
0208 }
0209 } else
0210 set_badblock(bb, start_sector, num_sectors);
0211 }
0212
0213 static void badblocks_populate(struct badrange *badrange,
0214 struct badblocks *bb, const struct range *range)
0215 {
0216 struct badrange_entry *bre;
0217
0218 if (list_empty(&badrange->list))
0219 return;
0220
0221 list_for_each_entry(bre, &badrange->list, list) {
0222 u64 bre_end = bre->start + bre->length - 1;
0223
0224
0225 if (bre_end < range->start)
0226 continue;
0227 if (bre->start > range->end)
0228 continue;
0229
0230 if (bre->start >= range->start) {
0231 u64 start = bre->start;
0232 u64 len;
0233
0234 if (bre_end <= range->end)
0235 len = bre->length;
0236 else
0237 len = range->start + range_len(range)
0238 - bre->start;
0239 __add_badblock_range(bb, start - range->start, len);
0240 continue;
0241 }
0242
0243
0244
0245
0246 if (bre->start < range->start) {
0247 u64 len;
0248
0249 if (bre_end < range->end)
0250 len = bre->start + bre->length - range->start;
0251 else
0252 len = range_len(range);
0253 __add_badblock_range(bb, 0, len);
0254 }
0255 }
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 void nvdimm_badblocks_populate(struct nd_region *nd_region,
0270 struct badblocks *bb, const struct range *range)
0271 {
0272 struct nvdimm_bus *nvdimm_bus;
0273
0274 if (!is_memory(&nd_region->dev)) {
0275 dev_WARN_ONCE(&nd_region->dev, 1,
0276 "%s only valid for pmem regions\n", __func__);
0277 return;
0278 }
0279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
0280
0281 nvdimm_bus_lock(&nvdimm_bus->dev);
0282 badblocks_populate(&nvdimm_bus->badrange, bb, range);
0283 nvdimm_bus_unlock(&nvdimm_bus->dev);
0284 }
0285 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);