0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/sched/task_stack.h>
0010 #include <linux/sched/debug.h>
0011 #include <linux/sched.h>
0012 #include <linux/kernel.h>
0013 #include <linux/export.h>
0014 #include <linux/kallsyms.h>
0015 #include <linux/stacktrace.h>
0016 #include <linux/interrupt.h>
0017
0018
0019
0020
0021
0022
0023
0024 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
0025 int spaces)
0026 {
0027 unsigned int i;
0028
0029 if (WARN_ON(!entries))
0030 return;
0031
0032 for (i = 0; i < nr_entries; i++)
0033 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
0034 }
0035 EXPORT_SYMBOL_GPL(stack_trace_print);
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
0048 unsigned int nr_entries, int spaces)
0049 {
0050 unsigned int generated, i, total = 0;
0051
0052 if (WARN_ON(!entries))
0053 return 0;
0054
0055 for (i = 0; i < nr_entries && size; i++) {
0056 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
0057 (void *)entries[i]);
0058
0059 total += generated;
0060 if (generated >= size) {
0061 buf += size;
0062 size = 0;
0063 } else {
0064 buf += generated;
0065 size -= generated;
0066 }
0067 }
0068
0069 return total;
0070 }
0071 EXPORT_SYMBOL_GPL(stack_trace_snprint);
0072
0073 #ifdef CONFIG_ARCH_STACKWALK
0074
0075 struct stacktrace_cookie {
0076 unsigned long *store;
0077 unsigned int size;
0078 unsigned int skip;
0079 unsigned int len;
0080 };
0081
0082 static bool stack_trace_consume_entry(void *cookie, unsigned long addr)
0083 {
0084 struct stacktrace_cookie *c = cookie;
0085
0086 if (c->len >= c->size)
0087 return false;
0088
0089 if (c->skip > 0) {
0090 c->skip--;
0091 return true;
0092 }
0093 c->store[c->len++] = addr;
0094 return c->len < c->size;
0095 }
0096
0097 static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr)
0098 {
0099 if (in_sched_functions(addr))
0100 return true;
0101 return stack_trace_consume_entry(cookie, addr);
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
0113 unsigned int skipnr)
0114 {
0115 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
0116 struct stacktrace_cookie c = {
0117 .store = store,
0118 .size = size,
0119 .skip = skipnr + 1,
0120 };
0121
0122 arch_stack_walk(consume_entry, &c, current, NULL);
0123 return c.len;
0124 }
0125 EXPORT_SYMBOL_GPL(stack_trace_save);
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
0137 unsigned int size, unsigned int skipnr)
0138 {
0139 stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
0140 struct stacktrace_cookie c = {
0141 .store = store,
0142 .size = size,
0143
0144 .skip = skipnr + (current == tsk),
0145 };
0146
0147 if (!try_get_task_stack(tsk))
0148 return 0;
0149
0150 arch_stack_walk(consume_entry, &c, tsk, NULL);
0151 put_task_stack(tsk);
0152 return c.len;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
0165 unsigned int size, unsigned int skipnr)
0166 {
0167 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
0168 struct stacktrace_cookie c = {
0169 .store = store,
0170 .size = size,
0171 .skip = skipnr,
0172 };
0173
0174 arch_stack_walk(consume_entry, &c, current, regs);
0175 return c.len;
0176 }
0177
0178 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
0192 unsigned int size)
0193 {
0194 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
0195 struct stacktrace_cookie c = {
0196 .store = store,
0197 .size = size,
0198 };
0199 int ret;
0200
0201
0202
0203
0204
0205 if (!try_get_task_stack(tsk))
0206 return 0;
0207
0208 ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
0209 put_task_stack(tsk);
0210 return ret ? ret : c.len;
0211 }
0212 #endif
0213
0214 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
0215
0216
0217
0218
0219
0220
0221
0222 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
0223 {
0224 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
0225 struct stacktrace_cookie c = {
0226 .store = store,
0227 .size = size,
0228 };
0229
0230
0231 if (current->flags & PF_KTHREAD)
0232 return 0;
0233
0234 arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
0235
0236 return c.len;
0237 }
0238 #endif
0239
0240 #else
0241
0242
0243
0244
0245
0246
0247 __weak void
0248 save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
0249 {
0250 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
0251 }
0252
0253 __weak void
0254 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
0255 {
0256 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
0257 }
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
0268 unsigned int skipnr)
0269 {
0270 struct stack_trace trace = {
0271 .entries = store,
0272 .max_entries = size,
0273 .skip = skipnr + 1,
0274 };
0275
0276 save_stack_trace(&trace);
0277 return trace.nr_entries;
0278 }
0279 EXPORT_SYMBOL_GPL(stack_trace_save);
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290 unsigned int stack_trace_save_tsk(struct task_struct *task,
0291 unsigned long *store, unsigned int size,
0292 unsigned int skipnr)
0293 {
0294 struct stack_trace trace = {
0295 .entries = store,
0296 .max_entries = size,
0297
0298 .skip = skipnr + (current == task),
0299 };
0300
0301 save_stack_trace_tsk(task, &trace);
0302 return trace.nr_entries;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
0315 unsigned int size, unsigned int skipnr)
0316 {
0317 struct stack_trace trace = {
0318 .entries = store,
0319 .max_entries = size,
0320 .skip = skipnr,
0321 };
0322
0323 save_stack_trace_regs(regs, &trace);
0324 return trace.nr_entries;
0325 }
0326
0327 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
0341 unsigned int size)
0342 {
0343 struct stack_trace trace = {
0344 .entries = store,
0345 .max_entries = size,
0346 };
0347 int ret = save_stack_trace_tsk_reliable(tsk, &trace);
0348
0349 return ret ? ret : trace.nr_entries;
0350 }
0351 #endif
0352
0353 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
0354
0355
0356
0357
0358
0359
0360
0361 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
0362 {
0363 struct stack_trace trace = {
0364 .entries = store,
0365 .max_entries = size,
0366 };
0367
0368 save_stack_trace_user(&trace);
0369 return trace.nr_entries;
0370 }
0371 #endif
0372
0373 #endif
0374
0375 static inline bool in_irqentry_text(unsigned long ptr)
0376 {
0377 return (ptr >= (unsigned long)&__irqentry_text_start &&
0378 ptr < (unsigned long)&__irqentry_text_end) ||
0379 (ptr >= (unsigned long)&__softirqentry_text_start &&
0380 ptr < (unsigned long)&__softirqentry_text_end);
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390 unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries)
0391 {
0392 unsigned int i;
0393
0394 for (i = 0; i < nr_entries; i++) {
0395 if (in_irqentry_text(entries[i])) {
0396
0397 return i + 1;
0398 }
0399 }
0400 return nr_entries;
0401 }
0402 EXPORT_SYMBOL_GPL(filter_irq_stacks);