0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "kexec ranges: " fmt
0018
0019 #include <linux/sort.h>
0020 #include <linux/kexec.h>
0021 #include <linux/of_device.h>
0022 #include <linux/slab.h>
0023 #include <asm/sections.h>
0024 #include <asm/kexec_ranges.h>
0025
0026
0027
0028
0029
0030
0031
0032
0033 static inline unsigned int get_max_nr_ranges(size_t size)
0034 {
0035 return ((size - sizeof(struct crash_mem)) /
0036 sizeof(struct crash_mem_range));
0037 }
0038
0039
0040
0041
0042
0043
0044
0045
0046 static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs)
0047 {
0048 size_t size;
0049
0050 if (!mem_rngs)
0051 return 0;
0052
0053 size = (sizeof(struct crash_mem) +
0054 (mem_rngs->max_nr_ranges * sizeof(struct crash_mem_range)));
0055
0056
0057
0058
0059
0060 return ALIGN(size, MEM_RANGE_CHUNK_SZ);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
0074 {
0075 struct crash_mem *mem_rngs = *mem_ranges;
0076
0077 if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) {
0078 mem_rngs = realloc_mem_ranges(mem_ranges);
0079 if (!mem_rngs)
0080 return -ENOMEM;
0081 }
0082
0083 mem_rngs->ranges[mem_rngs->nr_ranges].start = base;
0084 mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1;
0085 pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n",
0086 base, base + size - 1, mem_rngs->nr_ranges);
0087 mem_rngs->nr_ranges++;
0088 return 0;
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static void __merge_memory_ranges(struct crash_mem *mem_rngs)
0100 {
0101 struct crash_mem_range *ranges;
0102 int i, idx;
0103
0104 if (!mem_rngs)
0105 return;
0106
0107 idx = 0;
0108 ranges = &(mem_rngs->ranges[0]);
0109 for (i = 1; i < mem_rngs->nr_ranges; i++) {
0110 if (ranges[i].start <= (ranges[i-1].end + 1))
0111 ranges[idx].end = ranges[i].end;
0112 else {
0113 idx++;
0114 if (i == idx)
0115 continue;
0116
0117 ranges[idx] = ranges[i];
0118 }
0119 }
0120 mem_rngs->nr_ranges = idx + 1;
0121 }
0122
0123
0124 static int rngcmp(const void *_x, const void *_y)
0125 {
0126 const struct crash_mem_range *x = _x, *y = _y;
0127
0128 if (x->start > y->start)
0129 return 1;
0130 if (x->start < y->start)
0131 return -1;
0132 return 0;
0133 }
0134
0135
0136
0137
0138
0139
0140
0141
0142 void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge)
0143 {
0144 int i;
0145
0146 if (!mem_rngs)
0147 return;
0148
0149
0150 sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges,
0151 sizeof(mem_rngs->ranges[0]), rngcmp, NULL);
0152
0153 if (merge)
0154 __merge_memory_ranges(mem_rngs);
0155
0156
0157 pr_debug("Memory ranges:\n");
0158 for (i = 0; i < mem_rngs->nr_ranges; i++) {
0159 pr_debug("\t[%03d][%#016llx - %#016llx]\n", i,
0160 mem_rngs->ranges[i].start,
0161 mem_rngs->ranges[i].end);
0162 }
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges)
0174 {
0175 struct crash_mem *mem_rngs = *mem_ranges;
0176 unsigned int nr_ranges;
0177 size_t size;
0178
0179 size = get_mem_rngs_size(mem_rngs);
0180 nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0;
0181
0182 size += MEM_RANGE_CHUNK_SZ;
0183 mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL);
0184 if (!mem_rngs) {
0185 kfree(*mem_ranges);
0186 *mem_ranges = NULL;
0187 return NULL;
0188 }
0189
0190 mem_rngs->nr_ranges = nr_ranges;
0191 mem_rngs->max_nr_ranges = get_max_nr_ranges(size);
0192 *mem_ranges = mem_rngs;
0193
0194 return mem_rngs;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
0209 {
0210 struct crash_mem *mem_rngs = *mem_ranges;
0211 u64 mstart, mend, end;
0212 unsigned int i;
0213
0214 if (!size)
0215 return 0;
0216
0217 end = base + size - 1;
0218
0219 if (!mem_rngs || !(mem_rngs->nr_ranges))
0220 return __add_mem_range(mem_ranges, base, size);
0221
0222 for (i = 0; i < mem_rngs->nr_ranges; i++) {
0223 mstart = mem_rngs->ranges[i].start;
0224 mend = mem_rngs->ranges[i].end;
0225 if (base < mend && end > mstart) {
0226 if (base < mstart)
0227 mem_rngs->ranges[i].start = base;
0228 if (end > mend)
0229 mem_rngs->ranges[i].end = end;
0230 return 0;
0231 }
0232 }
0233
0234 return __add_mem_range(mem_ranges, base, size);
0235 }
0236
0237
0238
0239
0240
0241
0242
0243 int add_tce_mem_ranges(struct crash_mem **mem_ranges)
0244 {
0245 struct device_node *dn = NULL;
0246 int ret = 0;
0247
0248 for_each_node_by_type(dn, "pci") {
0249 u64 base;
0250 u32 size;
0251
0252 ret = of_property_read_u64(dn, "linux,tce-base", &base);
0253 ret |= of_property_read_u32(dn, "linux,tce-size", &size);
0254 if (ret) {
0255
0256
0257
0258
0259 if (ret == -EINVAL) {
0260 ret = 0;
0261 continue;
0262 }
0263 break;
0264 }
0265
0266 ret = add_mem_range(mem_ranges, base, size);
0267 if (ret)
0268 break;
0269 }
0270
0271 of_node_put(dn);
0272 return ret;
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282 int add_initrd_mem_range(struct crash_mem **mem_ranges)
0283 {
0284 u64 base, end;
0285 int ret;
0286
0287
0288 if (!strstr(saved_command_line, "retain_initrd"))
0289 return 0;
0290
0291 ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base);
0292 ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end);
0293 if (!ret)
0294 ret = add_mem_range(mem_ranges, base, end - base + 1);
0295
0296 return ret;
0297 }
0298
0299 #ifdef CONFIG_PPC_64S_HASH_MMU
0300
0301
0302
0303
0304
0305
0306
0307 int add_htab_mem_range(struct crash_mem **mem_ranges)
0308 {
0309 if (!htab_address)
0310 return 0;
0311
0312 return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
0313 }
0314 #endif
0315
0316
0317
0318
0319
0320
0321
0322
0323 int add_kernel_mem_range(struct crash_mem **mem_ranges)
0324 {
0325 return add_mem_range(mem_ranges, 0, __pa(_end));
0326 }
0327
0328
0329
0330
0331
0332
0333
0334 int add_rtas_mem_range(struct crash_mem **mem_ranges)
0335 {
0336 struct device_node *dn;
0337 u32 base, size;
0338 int ret = 0;
0339
0340 dn = of_find_node_by_path("/rtas");
0341 if (!dn)
0342 return 0;
0343
0344 ret = of_property_read_u32(dn, "linux,rtas-base", &base);
0345 ret |= of_property_read_u32(dn, "rtas-size", &size);
0346 if (!ret)
0347 ret = add_mem_range(mem_ranges, base, size);
0348
0349 of_node_put(dn);
0350 return ret;
0351 }
0352
0353
0354
0355
0356
0357
0358
0359 int add_opal_mem_range(struct crash_mem **mem_ranges)
0360 {
0361 struct device_node *dn;
0362 u64 base, size;
0363 int ret;
0364
0365 dn = of_find_node_by_path("/ibm,opal");
0366 if (!dn)
0367 return 0;
0368
0369 ret = of_property_read_u64(dn, "opal-base-address", &base);
0370 ret |= of_property_read_u64(dn, "opal-runtime-size", &size);
0371 if (!ret)
0372 ret = add_mem_range(mem_ranges, base, size);
0373
0374 of_node_put(dn);
0375 return ret;
0376 }
0377
0378
0379
0380
0381
0382
0383
0384
0385 int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
0386 {
0387 int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
0388 const __be32 *prop;
0389
0390 prop = of_get_property(of_root, "reserved-ranges", &len);
0391 if (!prop)
0392 return 0;
0393
0394 n_mem_addr_cells = of_n_addr_cells(of_root);
0395 n_mem_size_cells = of_n_size_cells(of_root);
0396 cells = n_mem_addr_cells + n_mem_size_cells;
0397
0398
0399 for (i = 0; i < (len / (sizeof(u32) * cells)); i++) {
0400 u64 base, size;
0401
0402 base = of_read_number(prop + (i * cells), n_mem_addr_cells);
0403 size = of_read_number(prop + (i * cells) + n_mem_addr_cells,
0404 n_mem_size_cells);
0405
0406 ret = add_mem_range(mem_ranges, base, size);
0407 if (ret)
0408 break;
0409 }
0410
0411 return ret;
0412 }