0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/ascii85.h>
0031 #include <linux/highmem.h>
0032 #include <linux/nmi.h>
0033 #include <linux/pagevec.h>
0034 #include <linux/scatterlist.h>
0035 #include <linux/string_helpers.h>
0036 #include <linux/utsname.h>
0037 #include <linux/zlib.h>
0038
0039 #include <drm/drm_cache.h>
0040 #include <drm/drm_print.h>
0041
0042 #include "display/intel_dmc.h"
0043 #include "display/intel_overlay.h"
0044
0045 #include "gem/i915_gem_context.h"
0046 #include "gem/i915_gem_lmem.h"
0047 #include "gt/intel_engine_regs.h"
0048 #include "gt/intel_gt.h"
0049 #include "gt/intel_gt_mcr.h"
0050 #include "gt/intel_gt_pm.h"
0051 #include "gt/intel_gt_regs.h"
0052 #include "gt/uc/intel_guc_capture.h"
0053
0054 #include "i915_driver.h"
0055 #include "i915_drv.h"
0056 #include "i915_gpu_error.h"
0057 #include "i915_memcpy.h"
0058 #include "i915_scatterlist.h"
0059 #include "i915_utils.h"
0060
0061 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
0062 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
0063
0064 static void __sg_set_buf(struct scatterlist *sg,
0065 void *addr, unsigned int len, loff_t it)
0066 {
0067 sg->page_link = (unsigned long)virt_to_page(addr);
0068 sg->offset = offset_in_page(addr);
0069 sg->length = len;
0070 sg->dma_address = it;
0071 }
0072
0073 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
0074 {
0075 if (!len)
0076 return false;
0077
0078 if (e->bytes + len + 1 <= e->size)
0079 return true;
0080
0081 if (e->bytes) {
0082 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
0083 e->iter += e->bytes;
0084 e->buf = NULL;
0085 e->bytes = 0;
0086 }
0087
0088 if (e->cur == e->end) {
0089 struct scatterlist *sgl;
0090
0091 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
0092 if (!sgl) {
0093 e->err = -ENOMEM;
0094 return false;
0095 }
0096
0097 if (e->cur) {
0098 e->cur->offset = 0;
0099 e->cur->length = 0;
0100 e->cur->page_link =
0101 (unsigned long)sgl | SG_CHAIN;
0102 } else {
0103 e->sgl = sgl;
0104 }
0105
0106 e->cur = sgl;
0107 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
0108 }
0109
0110 e->size = ALIGN(len + 1, SZ_64K);
0111 e->buf = kmalloc(e->size, ALLOW_FAIL);
0112 if (!e->buf) {
0113 e->size = PAGE_ALIGN(len + 1);
0114 e->buf = kmalloc(e->size, GFP_KERNEL);
0115 }
0116 if (!e->buf) {
0117 e->err = -ENOMEM;
0118 return false;
0119 }
0120
0121 return true;
0122 }
0123
0124 __printf(2, 0)
0125 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
0126 const char *fmt, va_list args)
0127 {
0128 va_list ap;
0129 int len;
0130
0131 if (e->err)
0132 return;
0133
0134 va_copy(ap, args);
0135 len = vsnprintf(NULL, 0, fmt, ap);
0136 va_end(ap);
0137 if (len <= 0) {
0138 e->err = len;
0139 return;
0140 }
0141
0142 if (!__i915_error_grow(e, len))
0143 return;
0144
0145 GEM_BUG_ON(e->bytes >= e->size);
0146 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
0147 if (len < 0) {
0148 e->err = len;
0149 return;
0150 }
0151 e->bytes += len;
0152 }
0153
0154 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
0155 {
0156 unsigned len;
0157
0158 if (e->err || !str)
0159 return;
0160
0161 len = strlen(str);
0162 if (!__i915_error_grow(e, len))
0163 return;
0164
0165 GEM_BUG_ON(e->bytes + len > e->size);
0166 memcpy(e->buf + e->bytes, str, len);
0167 e->bytes += len;
0168 }
0169
0170 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
0171 #define err_puts(e, s) i915_error_puts(e, s)
0172
0173 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
0174 {
0175 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
0176 }
0177
0178 static inline struct drm_printer
0179 i915_error_printer(struct drm_i915_error_state_buf *e)
0180 {
0181 struct drm_printer p = {
0182 .printfn = __i915_printfn_error,
0183 .arg = e,
0184 };
0185 return p;
0186 }
0187
0188
0189 static void pool_fini(struct pagevec *pv)
0190 {
0191 pagevec_release(pv);
0192 }
0193
0194 static int pool_refill(struct pagevec *pv, gfp_t gfp)
0195 {
0196 while (pagevec_space(pv)) {
0197 struct page *p;
0198
0199 p = alloc_page(gfp);
0200 if (!p)
0201 return -ENOMEM;
0202
0203 pagevec_add(pv, p);
0204 }
0205
0206 return 0;
0207 }
0208
0209 static int pool_init(struct pagevec *pv, gfp_t gfp)
0210 {
0211 int err;
0212
0213 pagevec_init(pv);
0214
0215 err = pool_refill(pv, gfp);
0216 if (err)
0217 pool_fini(pv);
0218
0219 return err;
0220 }
0221
0222 static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
0223 {
0224 struct page *p;
0225
0226 p = alloc_page(gfp);
0227 if (!p && pagevec_count(pv))
0228 p = pv->pages[--pv->nr];
0229
0230 return p ? page_address(p) : NULL;
0231 }
0232
0233 static void pool_free(struct pagevec *pv, void *addr)
0234 {
0235 struct page *p = virt_to_page(addr);
0236
0237 if (pagevec_space(pv))
0238 pagevec_add(pv, p);
0239 else
0240 __free_page(p);
0241 }
0242
0243 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
0244
0245 struct i915_vma_compress {
0246 struct pagevec pool;
0247 struct z_stream_s zstream;
0248 void *tmp;
0249 };
0250
0251 static bool compress_init(struct i915_vma_compress *c)
0252 {
0253 struct z_stream_s *zstream = &c->zstream;
0254
0255 if (pool_init(&c->pool, ALLOW_FAIL))
0256 return false;
0257
0258 zstream->workspace =
0259 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
0260 ALLOW_FAIL);
0261 if (!zstream->workspace) {
0262 pool_fini(&c->pool);
0263 return false;
0264 }
0265
0266 c->tmp = NULL;
0267 if (i915_has_memcpy_from_wc())
0268 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
0269
0270 return true;
0271 }
0272
0273 static bool compress_start(struct i915_vma_compress *c)
0274 {
0275 struct z_stream_s *zstream = &c->zstream;
0276 void *workspace = zstream->workspace;
0277
0278 memset(zstream, 0, sizeof(*zstream));
0279 zstream->workspace = workspace;
0280
0281 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
0282 }
0283
0284 static void *compress_next_page(struct i915_vma_compress *c,
0285 struct i915_vma_coredump *dst)
0286 {
0287 void *page_addr;
0288 struct page *page;
0289
0290 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
0291 if (!page_addr)
0292 return ERR_PTR(-ENOMEM);
0293
0294 page = virt_to_page(page_addr);
0295 list_add_tail(&page->lru, &dst->page_list);
0296 return page_addr;
0297 }
0298
0299 static int compress_page(struct i915_vma_compress *c,
0300 void *src,
0301 struct i915_vma_coredump *dst,
0302 bool wc)
0303 {
0304 struct z_stream_s *zstream = &c->zstream;
0305
0306 zstream->next_in = src;
0307 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
0308 zstream->next_in = c->tmp;
0309 zstream->avail_in = PAGE_SIZE;
0310
0311 do {
0312 if (zstream->avail_out == 0) {
0313 zstream->next_out = compress_next_page(c, dst);
0314 if (IS_ERR(zstream->next_out))
0315 return PTR_ERR(zstream->next_out);
0316
0317 zstream->avail_out = PAGE_SIZE;
0318 }
0319
0320 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
0321 return -EIO;
0322
0323 cond_resched();
0324 } while (zstream->avail_in);
0325
0326
0327 if (0 && zstream->total_out > zstream->total_in)
0328 return -E2BIG;
0329
0330 return 0;
0331 }
0332
0333 static int compress_flush(struct i915_vma_compress *c,
0334 struct i915_vma_coredump *dst)
0335 {
0336 struct z_stream_s *zstream = &c->zstream;
0337
0338 do {
0339 switch (zlib_deflate(zstream, Z_FINISH)) {
0340 case Z_OK:
0341 zstream->next_out = compress_next_page(c, dst);
0342 if (IS_ERR(zstream->next_out))
0343 return PTR_ERR(zstream->next_out);
0344
0345 zstream->avail_out = PAGE_SIZE;
0346 break;
0347
0348 case Z_STREAM_END:
0349 goto end;
0350
0351 default:
0352 return -EIO;
0353 }
0354 } while (1);
0355
0356 end:
0357 memset(zstream->next_out, 0, zstream->avail_out);
0358 dst->unused = zstream->avail_out;
0359 return 0;
0360 }
0361
0362 static void compress_finish(struct i915_vma_compress *c)
0363 {
0364 zlib_deflateEnd(&c->zstream);
0365 }
0366
0367 static void compress_fini(struct i915_vma_compress *c)
0368 {
0369 kfree(c->zstream.workspace);
0370 if (c->tmp)
0371 pool_free(&c->pool, c->tmp);
0372 pool_fini(&c->pool);
0373 }
0374
0375 static void err_compression_marker(struct drm_i915_error_state_buf *m)
0376 {
0377 err_puts(m, ":");
0378 }
0379
0380 #else
0381
0382 struct i915_vma_compress {
0383 struct pagevec pool;
0384 };
0385
0386 static bool compress_init(struct i915_vma_compress *c)
0387 {
0388 return pool_init(&c->pool, ALLOW_FAIL) == 0;
0389 }
0390
0391 static bool compress_start(struct i915_vma_compress *c)
0392 {
0393 return true;
0394 }
0395
0396 static int compress_page(struct i915_vma_compress *c,
0397 void *src,
0398 struct i915_vma_coredump *dst,
0399 bool wc)
0400 {
0401 void *ptr;
0402
0403 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
0404 if (!ptr)
0405 return -ENOMEM;
0406
0407 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
0408 memcpy(ptr, src, PAGE_SIZE);
0409 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
0410 cond_resched();
0411
0412 return 0;
0413 }
0414
0415 static int compress_flush(struct i915_vma_compress *c,
0416 struct i915_vma_coredump *dst)
0417 {
0418 return 0;
0419 }
0420
0421 static void compress_finish(struct i915_vma_compress *c)
0422 {
0423 }
0424
0425 static void compress_fini(struct i915_vma_compress *c)
0426 {
0427 pool_fini(&c->pool);
0428 }
0429
0430 static void err_compression_marker(struct drm_i915_error_state_buf *m)
0431 {
0432 err_puts(m, "~");
0433 }
0434
0435 #endif
0436
0437 static void error_print_instdone(struct drm_i915_error_state_buf *m,
0438 const struct intel_engine_coredump *ee)
0439 {
0440 int slice;
0441 int subslice;
0442 int iter;
0443
0444 err_printf(m, " INSTDONE: 0x%08x\n",
0445 ee->instdone.instdone);
0446
0447 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
0448 return;
0449
0450 err_printf(m, " SC_INSTDONE: 0x%08x\n",
0451 ee->instdone.slice_common);
0452
0453 if (GRAPHICS_VER(m->i915) <= 6)
0454 return;
0455
0456 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
0457 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
0458 slice, subslice,
0459 ee->instdone.sampler[slice][subslice]);
0460
0461 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
0462 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
0463 slice, subslice,
0464 ee->instdone.row[slice][subslice]);
0465
0466 if (GRAPHICS_VER(m->i915) < 12)
0467 return;
0468
0469 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
0470 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
0471 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
0472 slice, subslice,
0473 ee->instdone.geom_svg[slice][subslice]);
0474 }
0475
0476 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
0477 ee->instdone.slice_common_extra[0]);
0478 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
0479 ee->instdone.slice_common_extra[1]);
0480 }
0481
0482 static void error_print_request(struct drm_i915_error_state_buf *m,
0483 const char *prefix,
0484 const struct i915_request_coredump *erq)
0485 {
0486 if (!erq->seqno)
0487 return;
0488
0489 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
0490 prefix, erq->pid, erq->context, erq->seqno,
0491 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
0492 &erq->flags) ? "!" : "",
0493 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
0494 &erq->flags) ? "+" : "",
0495 erq->sched_attr.priority,
0496 erq->head, erq->tail);
0497 }
0498
0499 static void error_print_context(struct drm_i915_error_state_buf *m,
0500 const char *header,
0501 const struct i915_gem_context_coredump *ctx)
0502 {
0503 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
0504 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
0505 ctx->guilty, ctx->active,
0506 ctx->total_runtime, ctx->avg_runtime);
0507 }
0508
0509 static struct i915_vma_coredump *
0510 __find_vma(struct i915_vma_coredump *vma, const char *name)
0511 {
0512 while (vma) {
0513 if (strcmp(vma->name, name) == 0)
0514 return vma;
0515 vma = vma->next;
0516 }
0517
0518 return NULL;
0519 }
0520
0521 struct i915_vma_coredump *
0522 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
0523 {
0524 return __find_vma(ee->vma, "batch");
0525 }
0526
0527 static void error_print_engine(struct drm_i915_error_state_buf *m,
0528 const struct intel_engine_coredump *ee)
0529 {
0530 struct i915_vma_coredump *batch;
0531 int n;
0532
0533 err_printf(m, "%s command stream:\n", ee->engine->name);
0534 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
0535 err_printf(m, " START: 0x%08x\n", ee->start);
0536 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
0537 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
0538 ee->tail, ee->rq_post, ee->rq_tail);
0539 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
0540 err_printf(m, " MODE: 0x%08x\n", ee->mode);
0541 err_printf(m, " HWS: 0x%08x\n", ee->hws);
0542 err_printf(m, " ACTHD: 0x%08x %08x\n",
0543 (u32)(ee->acthd>>32), (u32)ee->acthd);
0544 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
0545 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
0546 err_printf(m, " ESR: 0x%08x\n", ee->esr);
0547
0548 error_print_instdone(m, ee);
0549
0550 batch = intel_gpu_error_find_batch(ee);
0551 if (batch) {
0552 u64 start = batch->gtt_offset;
0553 u64 end = start + batch->gtt_size;
0554
0555 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
0556 upper_32_bits(start), lower_32_bits(start),
0557 upper_32_bits(end), lower_32_bits(end));
0558 }
0559 if (GRAPHICS_VER(m->i915) >= 4) {
0560 err_printf(m, " BBADDR: 0x%08x_%08x\n",
0561 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
0562 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
0563 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
0564 }
0565 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
0566 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
0567 lower_32_bits(ee->faddr));
0568 if (GRAPHICS_VER(m->i915) >= 6) {
0569 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
0570 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
0571 }
0572 if (GRAPHICS_VER(m->i915) >= 11) {
0573 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
0574 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
0575 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
0576 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
0577 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
0578 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
0579 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
0580 }
0581 if (HAS_PPGTT(m->i915)) {
0582 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
0583
0584 if (GRAPHICS_VER(m->i915) >= 8) {
0585 int i;
0586 for (i = 0; i < 4; i++)
0587 err_printf(m, " PDP%d: 0x%016llx\n",
0588 i, ee->vm_info.pdp[i]);
0589 } else {
0590 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
0591 ee->vm_info.pp_dir_base);
0592 }
0593 }
0594
0595 for (n = 0; n < ee->num_ports; n++) {
0596 err_printf(m, " ELSP[%d]:", n);
0597 error_print_request(m, " ", &ee->execlist[n]);
0598 }
0599 }
0600
0601 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
0602 {
0603 va_list args;
0604
0605 va_start(args, f);
0606 i915_error_vprintf(e, f, args);
0607 va_end(args);
0608 }
0609
0610 void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
0611 const struct intel_engine_cs *engine,
0612 const struct i915_vma_coredump *vma)
0613 {
0614 char out[ASCII85_BUFSZ];
0615 struct page *page;
0616
0617 if (!vma)
0618 return;
0619
0620 err_printf(m, "%s --- %s = 0x%08x %08x\n",
0621 engine ? engine->name : "global", vma->name,
0622 upper_32_bits(vma->gtt_offset),
0623 lower_32_bits(vma->gtt_offset));
0624
0625 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
0626 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
0627
0628 err_compression_marker(m);
0629 list_for_each_entry(page, &vma->page_list, lru) {
0630 int i, len;
0631 const u32 *addr = page_address(page);
0632
0633 len = PAGE_SIZE;
0634 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
0635 len -= vma->unused;
0636 len = ascii85_encode_len(len);
0637
0638 for (i = 0; i < len; i++)
0639 err_puts(m, ascii85_encode(addr[i], out));
0640 }
0641 err_puts(m, "\n");
0642 }
0643
0644 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
0645 struct i915_gpu_coredump *error)
0646 {
0647 struct drm_printer p = i915_error_printer(m);
0648
0649 intel_device_info_print_static(&error->device_info, &p);
0650 intel_device_info_print_runtime(&error->runtime_info, &p);
0651 intel_driver_caps_print(&error->driver_caps, &p);
0652 }
0653
0654 static void err_print_params(struct drm_i915_error_state_buf *m,
0655 const struct i915_params *params)
0656 {
0657 struct drm_printer p = i915_error_printer(m);
0658
0659 i915_params_dump(params, &p);
0660 }
0661
0662 static void err_print_pciid(struct drm_i915_error_state_buf *m,
0663 struct drm_i915_private *i915)
0664 {
0665 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0666
0667 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
0668 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
0669 err_printf(m, "PCI Subsystem: %04x:%04x\n",
0670 pdev->subsystem_vendor,
0671 pdev->subsystem_device);
0672 }
0673
0674 static void err_print_uc(struct drm_i915_error_state_buf *m,
0675 const struct intel_uc_coredump *error_uc)
0676 {
0677 struct drm_printer p = i915_error_printer(m);
0678
0679 intel_uc_fw_dump(&error_uc->guc_fw, &p);
0680 intel_uc_fw_dump(&error_uc->huc_fw, &p);
0681 intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
0682 }
0683
0684 static void err_free_sgl(struct scatterlist *sgl)
0685 {
0686 while (sgl) {
0687 struct scatterlist *sg;
0688
0689 for (sg = sgl; !sg_is_chain(sg); sg++) {
0690 kfree(sg_virt(sg));
0691 if (sg_is_last(sg))
0692 break;
0693 }
0694
0695 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
0696 free_page((unsigned long)sgl);
0697 sgl = sg;
0698 }
0699 }
0700
0701 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
0702 struct intel_gt_coredump *gt)
0703 {
0704 struct drm_printer p = i915_error_printer(m);
0705
0706 intel_gt_info_print(>->info, &p);
0707 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p);
0708 }
0709
0710 static void err_print_gt_display(struct drm_i915_error_state_buf *m,
0711 struct intel_gt_coredump *gt)
0712 {
0713 err_printf(m, "IER: 0x%08x\n", gt->ier);
0714 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
0715 }
0716
0717 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
0718 struct intel_gt_coredump *gt)
0719 {
0720 int i;
0721
0722 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
0723 err_printf(m, "EIR: 0x%08x\n", gt->eir);
0724 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
0725
0726 for (i = 0; i < gt->ngtier; i++)
0727 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
0728 }
0729
0730 static void err_print_gt_global(struct drm_i915_error_state_buf *m,
0731 struct intel_gt_coredump *gt)
0732 {
0733 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
0734
0735 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
0736 err_printf(m, "ERROR: 0x%08x\n", gt->error);
0737 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
0738 }
0739
0740 if (GRAPHICS_VER(m->i915) >= 8)
0741 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
0742 gt->fault_data1, gt->fault_data0);
0743
0744 if (GRAPHICS_VER(m->i915) == 7)
0745 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
0746
0747 if (IS_GRAPHICS_VER(m->i915, 8, 11))
0748 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
0749
0750 if (GRAPHICS_VER(m->i915) == 12)
0751 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
0752
0753 if (GRAPHICS_VER(m->i915) >= 12) {
0754 int i;
0755
0756 for (i = 0; i < I915_MAX_SFC; i++) {
0757
0758
0759
0760
0761
0762 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
0763 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
0764 continue;
0765
0766 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
0767 gt->sfc_done[i]);
0768 }
0769
0770 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
0771 }
0772 }
0773
0774 static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
0775 struct intel_gt_coredump *gt)
0776 {
0777 int i;
0778
0779 for (i = 0; i < gt->nfence; i++)
0780 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
0781 }
0782
0783 static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
0784 struct intel_gt_coredump *gt)
0785 {
0786 const struct intel_engine_coredump *ee;
0787
0788 for (ee = gt->engine; ee; ee = ee->next) {
0789 const struct i915_vma_coredump *vma;
0790
0791 if (ee->guc_capture_node)
0792 intel_guc_capture_print_engine_node(m, ee);
0793 else
0794 error_print_engine(m, ee);
0795
0796 err_printf(m, " hung: %u\n", ee->hung);
0797 err_printf(m, " engine reset count: %u\n", ee->reset_count);
0798 error_print_context(m, " Active context: ", &ee->context);
0799
0800 for (vma = ee->vma; vma; vma = vma->next)
0801 intel_gpu_error_print_vma(m, ee->engine, vma);
0802 }
0803
0804 }
0805
0806 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
0807 struct i915_gpu_coredump *error)
0808 {
0809 const struct intel_engine_coredump *ee;
0810 struct timespec64 ts;
0811
0812 if (*error->error_msg)
0813 err_printf(m, "%s\n", error->error_msg);
0814 err_printf(m, "Kernel: %s %s\n",
0815 init_utsname()->release,
0816 init_utsname()->machine);
0817 err_printf(m, "Driver: %s\n", DRIVER_DATE);
0818 ts = ktime_to_timespec64(error->time);
0819 err_printf(m, "Time: %lld s %ld us\n",
0820 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
0821 ts = ktime_to_timespec64(error->boottime);
0822 err_printf(m, "Boottime: %lld s %ld us\n",
0823 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
0824 ts = ktime_to_timespec64(error->uptime);
0825 err_printf(m, "Uptime: %lld s %ld us\n",
0826 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
0827 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
0828 error->capture, jiffies_to_msecs(jiffies - error->capture));
0829
0830 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
0831 err_printf(m, "Active process (on ring %s): %s [%d]\n",
0832 ee->engine->name,
0833 ee->context.comm,
0834 ee->context.pid);
0835
0836 err_printf(m, "Reset count: %u\n", error->reset_count);
0837 err_printf(m, "Suspend count: %u\n", error->suspend_count);
0838 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
0839 err_printf(m, "Subplatform: 0x%x\n",
0840 intel_subplatform(&error->runtime_info,
0841 error->device_info.platform));
0842 err_print_pciid(m, m->i915);
0843
0844 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
0845
0846 intel_dmc_print_error_state(m, m->i915);
0847
0848 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
0849 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
0850
0851 if (error->gt) {
0852 bool print_guc_capture = false;
0853
0854 if (error->gt->uc && error->gt->uc->is_guc_capture)
0855 print_guc_capture = true;
0856
0857 err_print_gt_display(m, error->gt);
0858 err_print_gt_global_nonguc(m, error->gt);
0859 err_print_gt_fences(m, error->gt);
0860
0861
0862
0863
0864
0865 if (!print_guc_capture)
0866 err_print_gt_global(m, error->gt);
0867
0868 err_print_gt_engines(m, error->gt);
0869
0870 if (error->gt->uc)
0871 err_print_uc(m, error->gt->uc);
0872
0873 err_print_gt_info(m, error->gt);
0874 }
0875
0876 if (error->overlay)
0877 intel_overlay_print_error_state(m, error->overlay);
0878
0879 err_print_capabilities(m, error);
0880 err_print_params(m, &error->params);
0881 }
0882
0883 static int err_print_to_sgl(struct i915_gpu_coredump *error)
0884 {
0885 struct drm_i915_error_state_buf m;
0886
0887 if (IS_ERR(error))
0888 return PTR_ERR(error);
0889
0890 if (READ_ONCE(error->sgl))
0891 return 0;
0892
0893 memset(&m, 0, sizeof(m));
0894 m.i915 = error->i915;
0895
0896 __err_print_to_sgl(&m, error);
0897
0898 if (m.buf) {
0899 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
0900 m.bytes = 0;
0901 m.buf = NULL;
0902 }
0903 if (m.cur) {
0904 GEM_BUG_ON(m.end < m.cur);
0905 sg_mark_end(m.cur - 1);
0906 }
0907 GEM_BUG_ON(m.sgl && !m.cur);
0908
0909 if (m.err) {
0910 err_free_sgl(m.sgl);
0911 return m.err;
0912 }
0913
0914 if (cmpxchg(&error->sgl, NULL, m.sgl))
0915 err_free_sgl(m.sgl);
0916
0917 return 0;
0918 }
0919
0920 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
0921 char *buf, loff_t off, size_t rem)
0922 {
0923 struct scatterlist *sg;
0924 size_t count;
0925 loff_t pos;
0926 int err;
0927
0928 if (!error || !rem)
0929 return 0;
0930
0931 err = err_print_to_sgl(error);
0932 if (err)
0933 return err;
0934
0935 sg = READ_ONCE(error->fit);
0936 if (!sg || off < sg->dma_address)
0937 sg = error->sgl;
0938 if (!sg)
0939 return 0;
0940
0941 pos = sg->dma_address;
0942 count = 0;
0943 do {
0944 size_t len, start;
0945
0946 if (sg_is_chain(sg)) {
0947 sg = sg_chain_ptr(sg);
0948 GEM_BUG_ON(sg_is_chain(sg));
0949 }
0950
0951 len = sg->length;
0952 if (pos + len <= off) {
0953 pos += len;
0954 continue;
0955 }
0956
0957 start = sg->offset;
0958 if (pos < off) {
0959 GEM_BUG_ON(off - pos > len);
0960 len -= off - pos;
0961 start += off - pos;
0962 pos = off;
0963 }
0964
0965 len = min(len, rem);
0966 GEM_BUG_ON(!len || len > sg->length);
0967
0968 memcpy(buf, page_address(sg_page(sg)) + start, len);
0969
0970 count += len;
0971 pos += len;
0972
0973 buf += len;
0974 rem -= len;
0975 if (!rem) {
0976 WRITE_ONCE(error->fit, sg);
0977 break;
0978 }
0979 } while (!sg_is_last(sg++));
0980
0981 return count;
0982 }
0983
0984 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
0985 {
0986 while (vma) {
0987 struct i915_vma_coredump *next = vma->next;
0988 struct page *page, *n;
0989
0990 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
0991 list_del_init(&page->lru);
0992 __free_page(page);
0993 }
0994
0995 kfree(vma);
0996 vma = next;
0997 }
0998 }
0999
1000 static void cleanup_params(struct i915_gpu_coredump *error)
1001 {
1002 i915_params_free(&error->params);
1003 }
1004
1005 static void cleanup_uc(struct intel_uc_coredump *uc)
1006 {
1007 kfree(uc->guc_fw.path);
1008 kfree(uc->huc_fw.path);
1009 i915_vma_coredump_free(uc->guc_log);
1010
1011 kfree(uc);
1012 }
1013
1014 static void cleanup_gt(struct intel_gt_coredump *gt)
1015 {
1016 while (gt->engine) {
1017 struct intel_engine_coredump *ee = gt->engine;
1018
1019 gt->engine = ee->next;
1020
1021 i915_vma_coredump_free(ee->vma);
1022 intel_guc_capture_free_node(ee);
1023 kfree(ee);
1024 }
1025
1026 if (gt->uc)
1027 cleanup_uc(gt->uc);
1028
1029 kfree(gt);
1030 }
1031
1032 void __i915_gpu_coredump_free(struct kref *error_ref)
1033 {
1034 struct i915_gpu_coredump *error =
1035 container_of(error_ref, typeof(*error), ref);
1036
1037 while (error->gt) {
1038 struct intel_gt_coredump *gt = error->gt;
1039
1040 error->gt = gt->next;
1041 cleanup_gt(gt);
1042 }
1043
1044 kfree(error->overlay);
1045
1046 cleanup_params(error);
1047
1048 err_free_sgl(error->sgl);
1049 kfree(error);
1050 }
1051
1052 static struct i915_vma_coredump *
1053 i915_vma_coredump_create(const struct intel_gt *gt,
1054 const struct i915_vma_resource *vma_res,
1055 struct i915_vma_compress *compress,
1056 const char *name)
1057
1058 {
1059 struct i915_ggtt *ggtt = gt->ggtt;
1060 const u64 slot = ggtt->error_capture.start;
1061 struct i915_vma_coredump *dst;
1062 struct sgt_iter iter;
1063 int ret;
1064
1065 might_sleep();
1066
1067 if (!vma_res || !vma_res->bi.pages || !compress)
1068 return NULL;
1069
1070 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1071 if (!dst)
1072 return NULL;
1073
1074 if (!compress_start(compress)) {
1075 kfree(dst);
1076 return NULL;
1077 }
1078
1079 INIT_LIST_HEAD(&dst->page_list);
1080 strcpy(dst->name, name);
1081 dst->next = NULL;
1082
1083 dst->gtt_offset = vma_res->start;
1084 dst->gtt_size = vma_res->node_size;
1085 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1086 dst->unused = 0;
1087
1088 ret = -EINVAL;
1089 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1090 void __iomem *s;
1091 dma_addr_t dma;
1092
1093 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1094 mutex_lock(&ggtt->error_mutex);
1095 if (ggtt->vm.raw_insert_page)
1096 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1097 I915_CACHE_NONE, 0);
1098 else
1099 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1100 I915_CACHE_NONE, 0);
1101 mb();
1102
1103 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1104 ret = compress_page(compress,
1105 (void __force *)s, dst,
1106 true);
1107 io_mapping_unmap(s);
1108
1109 mb();
1110 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1111 mutex_unlock(&ggtt->error_mutex);
1112 if (ret)
1113 break;
1114 }
1115 } else if (vma_res->bi.lmem) {
1116 struct intel_memory_region *mem = vma_res->mr;
1117 dma_addr_t dma;
1118
1119 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1120 dma_addr_t offset = dma - mem->region.start;
1121 void __iomem *s;
1122
1123 if (offset + PAGE_SIZE > mem->io_size) {
1124 ret = -EINVAL;
1125 break;
1126 }
1127
1128 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1129 ret = compress_page(compress,
1130 (void __force *)s, dst,
1131 true);
1132 io_mapping_unmap(s);
1133 if (ret)
1134 break;
1135 }
1136 } else {
1137 struct page *page;
1138
1139 for_each_sgt_page(page, iter, vma_res->bi.pages) {
1140 void *s;
1141
1142 drm_clflush_pages(&page, 1);
1143
1144 s = kmap(page);
1145 ret = compress_page(compress, s, dst, false);
1146 kunmap(page);
1147
1148 drm_clflush_pages(&page, 1);
1149
1150 if (ret)
1151 break;
1152 }
1153 }
1154
1155 if (ret || compress_flush(compress, dst)) {
1156 struct page *page, *n;
1157
1158 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1159 list_del_init(&page->lru);
1160 pool_free(&compress->pool, page_address(page));
1161 }
1162
1163 kfree(dst);
1164 dst = NULL;
1165 }
1166 compress_finish(compress);
1167
1168 return dst;
1169 }
1170
1171 static void gt_record_fences(struct intel_gt_coredump *gt)
1172 {
1173 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1174 struct intel_uncore *uncore = gt->_gt->uncore;
1175 int i;
1176
1177 if (GRAPHICS_VER(uncore->i915) >= 6) {
1178 for (i = 0; i < ggtt->num_fences; i++)
1179 gt->fence[i] =
1180 intel_uncore_read64(uncore,
1181 FENCE_REG_GEN6_LO(i));
1182 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1183 for (i = 0; i < ggtt->num_fences; i++)
1184 gt->fence[i] =
1185 intel_uncore_read64(uncore,
1186 FENCE_REG_965_LO(i));
1187 } else {
1188 for (i = 0; i < ggtt->num_fences; i++)
1189 gt->fence[i] =
1190 intel_uncore_read(uncore, FENCE_REG(i));
1191 }
1192 gt->nfence = i;
1193 }
1194
1195 static void engine_record_registers(struct intel_engine_coredump *ee)
1196 {
1197 const struct intel_engine_cs *engine = ee->engine;
1198 struct drm_i915_private *i915 = engine->i915;
1199
1200 if (GRAPHICS_VER(i915) >= 6) {
1201 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1202
1203 if (GRAPHICS_VER(i915) >= 12)
1204 ee->fault_reg = intel_uncore_read(engine->uncore,
1205 GEN12_RING_FAULT_REG);
1206 else if (GRAPHICS_VER(i915) >= 8)
1207 ee->fault_reg = intel_uncore_read(engine->uncore,
1208 GEN8_RING_FAULT_REG);
1209 else
1210 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1211 }
1212
1213 if (GRAPHICS_VER(i915) >= 4) {
1214 ee->esr = ENGINE_READ(engine, RING_ESR);
1215 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1216 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1217 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1218 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1219 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1220 ee->ccid = ENGINE_READ(engine, CCID);
1221 if (GRAPHICS_VER(i915) >= 8) {
1222 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1223 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1224 }
1225 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1226 } else {
1227 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1228 ee->ipeir = ENGINE_READ(engine, IPEIR);
1229 ee->ipehr = ENGINE_READ(engine, IPEHR);
1230 }
1231
1232 if (GRAPHICS_VER(i915) >= 11) {
1233 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1234 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1235 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1236 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1237 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1238 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1239 ee->excc = ENGINE_READ(engine, RING_EXCC);
1240 }
1241
1242 intel_engine_get_instdone(engine, &ee->instdone);
1243
1244 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1245 ee->acthd = intel_engine_get_active_head(engine);
1246 ee->start = ENGINE_READ(engine, RING_START);
1247 ee->head = ENGINE_READ(engine, RING_HEAD);
1248 ee->tail = ENGINE_READ(engine, RING_TAIL);
1249 ee->ctl = ENGINE_READ(engine, RING_CTL);
1250 if (GRAPHICS_VER(i915) > 2)
1251 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1252
1253 if (!HWS_NEEDS_PHYSICAL(i915)) {
1254 i915_reg_t mmio;
1255
1256 if (GRAPHICS_VER(i915) == 7) {
1257 switch (engine->id) {
1258 default:
1259 MISSING_CASE(engine->id);
1260 fallthrough;
1261 case RCS0:
1262 mmio = RENDER_HWS_PGA_GEN7;
1263 break;
1264 case BCS0:
1265 mmio = BLT_HWS_PGA_GEN7;
1266 break;
1267 case VCS0:
1268 mmio = BSD_HWS_PGA_GEN7;
1269 break;
1270 case VECS0:
1271 mmio = VEBOX_HWS_PGA_GEN7;
1272 break;
1273 }
1274 } else if (GRAPHICS_VER(engine->i915) == 6) {
1275 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1276 } else {
1277
1278 mmio = RING_HWS_PGA(engine->mmio_base);
1279 }
1280
1281 ee->hws = intel_uncore_read(engine->uncore, mmio);
1282 }
1283
1284 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1285
1286 if (HAS_PPGTT(i915)) {
1287 int i;
1288
1289 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1290
1291 if (GRAPHICS_VER(i915) == 6) {
1292 ee->vm_info.pp_dir_base =
1293 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1294 } else if (GRAPHICS_VER(i915) == 7) {
1295 ee->vm_info.pp_dir_base =
1296 ENGINE_READ(engine, RING_PP_DIR_BASE);
1297 } else if (GRAPHICS_VER(i915) >= 8) {
1298 u32 base = engine->mmio_base;
1299
1300 for (i = 0; i < 4; i++) {
1301 ee->vm_info.pdp[i] =
1302 intel_uncore_read(engine->uncore,
1303 GEN8_RING_PDP_UDW(base, i));
1304 ee->vm_info.pdp[i] <<= 32;
1305 ee->vm_info.pdp[i] |=
1306 intel_uncore_read(engine->uncore,
1307 GEN8_RING_PDP_LDW(base, i));
1308 }
1309 }
1310 }
1311 }
1312
1313 static void record_request(const struct i915_request *request,
1314 struct i915_request_coredump *erq)
1315 {
1316 erq->flags = request->fence.flags;
1317 erq->context = request->fence.context;
1318 erq->seqno = request->fence.seqno;
1319 erq->sched_attr = request->sched.attr;
1320 erq->head = request->head;
1321 erq->tail = request->tail;
1322
1323 erq->pid = 0;
1324 rcu_read_lock();
1325 if (!intel_context_is_closed(request->context)) {
1326 const struct i915_gem_context *ctx;
1327
1328 ctx = rcu_dereference(request->context->gem_context);
1329 if (ctx)
1330 erq->pid = pid_nr(ctx->pid);
1331 }
1332 rcu_read_unlock();
1333 }
1334
1335 static void engine_record_execlists(struct intel_engine_coredump *ee)
1336 {
1337 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1338 struct i915_request * const *port = el->active;
1339 unsigned int n = 0;
1340
1341 while (*port)
1342 record_request(*port++, &ee->execlist[n++]);
1343
1344 ee->num_ports = n;
1345 }
1346
1347 static bool record_context(struct i915_gem_context_coredump *e,
1348 const struct i915_request *rq)
1349 {
1350 struct i915_gem_context *ctx;
1351 struct task_struct *task;
1352 bool simulated;
1353
1354 rcu_read_lock();
1355 ctx = rcu_dereference(rq->context->gem_context);
1356 if (ctx && !kref_get_unless_zero(&ctx->ref))
1357 ctx = NULL;
1358 rcu_read_unlock();
1359 if (!ctx)
1360 return true;
1361
1362 rcu_read_lock();
1363 task = pid_task(ctx->pid, PIDTYPE_PID);
1364 if (task) {
1365 strcpy(e->comm, task->comm);
1366 e->pid = task->pid;
1367 }
1368 rcu_read_unlock();
1369
1370 e->sched_attr = ctx->sched;
1371 e->guilty = atomic_read(&ctx->guilty_count);
1372 e->active = atomic_read(&ctx->active_count);
1373
1374 e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
1375 e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
1376
1377 simulated = i915_gem_context_no_error_capture(ctx);
1378
1379 i915_gem_context_put(ctx);
1380 return simulated;
1381 }
1382
1383 struct intel_engine_capture_vma {
1384 struct intel_engine_capture_vma *next;
1385 struct i915_vma_resource *vma_res;
1386 char name[16];
1387 bool lockdep_cookie;
1388 };
1389
1390 static struct intel_engine_capture_vma *
1391 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1392 struct i915_vma_resource *vma_res,
1393 gfp_t gfp, const char *name)
1394 {
1395 struct intel_engine_capture_vma *c;
1396
1397 if (!vma_res)
1398 return next;
1399
1400 c = kmalloc(sizeof(*c), gfp);
1401 if (!c)
1402 return next;
1403
1404 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1405 kfree(c);
1406 return next;
1407 }
1408
1409 strcpy(c->name, name);
1410 c->vma_res = i915_vma_resource_get(vma_res);
1411
1412 c->next = next;
1413 return c;
1414 }
1415
1416 static struct intel_engine_capture_vma *
1417 capture_vma(struct intel_engine_capture_vma *next,
1418 struct i915_vma *vma,
1419 const char *name,
1420 gfp_t gfp)
1421 {
1422 if (!vma)
1423 return next;
1424
1425
1426
1427
1428
1429
1430 if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1431 return next;
1432
1433 next = capture_vma_snapshot(next, vma->resource, gfp, name);
1434
1435 return next;
1436 }
1437
1438 static struct intel_engine_capture_vma *
1439 capture_user(struct intel_engine_capture_vma *capture,
1440 const struct i915_request *rq,
1441 gfp_t gfp)
1442 {
1443 struct i915_capture_list *c;
1444
1445 for (c = rq->capture_list; c; c = c->next)
1446 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1447 "user");
1448
1449 return capture;
1450 }
1451
1452 static void add_vma(struct intel_engine_coredump *ee,
1453 struct i915_vma_coredump *vma)
1454 {
1455 if (vma) {
1456 vma->next = ee->vma;
1457 ee->vma = vma;
1458 }
1459 }
1460
1461 static struct i915_vma_coredump *
1462 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1463 const char *name, struct i915_vma_compress *compress)
1464 {
1465 struct i915_vma_coredump *ret = NULL;
1466 struct i915_vma_resource *vma_res;
1467 bool lockdep_cookie;
1468
1469 if (!vma)
1470 return NULL;
1471
1472 vma_res = vma->resource;
1473
1474 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1475 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1476 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1477 }
1478
1479 return ret;
1480 }
1481
1482 static void add_vma_coredump(struct intel_engine_coredump *ee,
1483 const struct intel_gt *gt,
1484 struct i915_vma *vma,
1485 const char *name,
1486 struct i915_vma_compress *compress)
1487 {
1488 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1489 }
1490
1491 struct intel_engine_coredump *
1492 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1493 {
1494 struct intel_engine_coredump *ee;
1495
1496 ee = kzalloc(sizeof(*ee), gfp);
1497 if (!ee)
1498 return NULL;
1499
1500 ee->engine = engine;
1501
1502 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1503 engine_record_registers(ee);
1504 engine_record_execlists(ee);
1505 }
1506
1507 return ee;
1508 }
1509
1510 struct intel_engine_capture_vma *
1511 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1512 struct i915_request *rq,
1513 gfp_t gfp)
1514 {
1515 struct intel_engine_capture_vma *vma = NULL;
1516
1517 ee->simulated |= record_context(&ee->context, rq);
1518 if (ee->simulated)
1519 return NULL;
1520
1521
1522
1523
1524
1525
1526 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1527 vma = capture_user(vma, rq, gfp);
1528 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1529 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1530
1531 ee->rq_head = rq->head;
1532 ee->rq_post = rq->postfix;
1533 ee->rq_tail = rq->tail;
1534
1535 return vma;
1536 }
1537
1538 void
1539 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1540 struct intel_engine_capture_vma *capture,
1541 struct i915_vma_compress *compress)
1542 {
1543 const struct intel_engine_cs *engine = ee->engine;
1544
1545 while (capture) {
1546 struct intel_engine_capture_vma *this = capture;
1547 struct i915_vma_resource *vma_res = this->vma_res;
1548
1549 add_vma(ee,
1550 i915_vma_coredump_create(engine->gt, vma_res,
1551 compress, this->name));
1552
1553 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1554 i915_vma_resource_put(vma_res);
1555
1556 capture = this->next;
1557 kfree(this);
1558 }
1559
1560 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1561 "HW Status", compress);
1562
1563 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1564 "WA context", compress);
1565 }
1566
1567 static struct intel_engine_coredump *
1568 capture_engine(struct intel_engine_cs *engine,
1569 struct i915_vma_compress *compress,
1570 u32 dump_flags)
1571 {
1572 struct intel_engine_capture_vma *capture = NULL;
1573 struct intel_engine_coredump *ee;
1574 struct intel_context *ce;
1575 struct i915_request *rq = NULL;
1576 unsigned long flags;
1577
1578 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1579 if (!ee)
1580 return NULL;
1581
1582 ce = intel_engine_get_hung_context(engine);
1583 if (ce) {
1584 intel_engine_clear_hung_context(engine);
1585 rq = intel_context_find_active_request(ce);
1586 if (!rq || !i915_request_started(rq))
1587 goto no_request_capture;
1588 } else {
1589
1590
1591
1592
1593 if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
1594 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1595 rq = intel_engine_execlist_find_hung_request(engine);
1596 spin_unlock_irqrestore(&engine->sched_engine->lock,
1597 flags);
1598 }
1599 }
1600 if (rq)
1601 rq = i915_request_get_rcu(rq);
1602
1603 if (!rq)
1604 goto no_request_capture;
1605
1606 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1607 if (!capture) {
1608 i915_request_put(rq);
1609 goto no_request_capture;
1610 }
1611 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1612 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1613
1614 intel_engine_coredump_add_vma(ee, capture, compress);
1615 i915_request_put(rq);
1616
1617 return ee;
1618
1619 no_request_capture:
1620 kfree(ee);
1621 return NULL;
1622 }
1623
1624 static void
1625 gt_record_engines(struct intel_gt_coredump *gt,
1626 intel_engine_mask_t engine_mask,
1627 struct i915_vma_compress *compress,
1628 u32 dump_flags)
1629 {
1630 struct intel_engine_cs *engine;
1631 enum intel_engine_id id;
1632
1633 for_each_engine(engine, gt->_gt, id) {
1634 struct intel_engine_coredump *ee;
1635
1636
1637 pool_refill(&compress->pool, ALLOW_FAIL);
1638
1639 ee = capture_engine(engine, compress, dump_flags);
1640 if (!ee)
1641 continue;
1642
1643 ee->hung = engine->mask & engine_mask;
1644
1645 gt->simulated |= ee->simulated;
1646 if (ee->simulated) {
1647 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1648 intel_guc_capture_free_node(ee);
1649 kfree(ee);
1650 continue;
1651 }
1652
1653 ee->next = gt->engine;
1654 gt->engine = ee;
1655 }
1656 }
1657
1658 static struct intel_uc_coredump *
1659 gt_record_uc(struct intel_gt_coredump *gt,
1660 struct i915_vma_compress *compress)
1661 {
1662 const struct intel_uc *uc = >->_gt->uc;
1663 struct intel_uc_coredump *error_uc;
1664
1665 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1666 if (!error_uc)
1667 return NULL;
1668
1669 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1670 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1671
1672
1673
1674
1675
1676 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1677 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1678 error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1679 "GuC log buffer", compress);
1680
1681 return error_uc;
1682 }
1683
1684
1685 static void gt_record_display_regs(struct intel_gt_coredump *gt)
1686 {
1687 struct intel_uncore *uncore = gt->_gt->uncore;
1688 struct drm_i915_private *i915 = uncore->i915;
1689
1690 if (GRAPHICS_VER(i915) >= 6)
1691 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1692
1693 if (GRAPHICS_VER(i915) >= 8)
1694 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1695 else if (IS_VALLEYVIEW(i915))
1696 gt->ier = intel_uncore_read(uncore, VLV_IER);
1697 else if (HAS_PCH_SPLIT(i915))
1698 gt->ier = intel_uncore_read(uncore, DEIER);
1699 else if (GRAPHICS_VER(i915) == 2)
1700 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1701 else
1702 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1703 }
1704
1705
1706 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1707 {
1708 struct intel_uncore *uncore = gt->_gt->uncore;
1709 struct drm_i915_private *i915 = uncore->i915;
1710 int i;
1711
1712 if (IS_VALLEYVIEW(i915)) {
1713 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1714 gt->ngtier = 1;
1715 } else if (GRAPHICS_VER(i915) >= 11) {
1716 gt->gtier[0] =
1717 intel_uncore_read(uncore,
1718 GEN11_RENDER_COPY_INTR_ENABLE);
1719 gt->gtier[1] =
1720 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1721 gt->gtier[2] =
1722 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1723 gt->gtier[3] =
1724 intel_uncore_read(uncore,
1725 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1726 gt->gtier[4] =
1727 intel_uncore_read(uncore,
1728 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1729 gt->gtier[5] =
1730 intel_uncore_read(uncore,
1731 GEN11_GUNIT_CSME_INTR_ENABLE);
1732 gt->ngtier = 6;
1733 } else if (GRAPHICS_VER(i915) >= 8) {
1734 for (i = 0; i < 4; i++)
1735 gt->gtier[i] =
1736 intel_uncore_read(uncore, GEN8_GT_IER(i));
1737 gt->ngtier = 4;
1738 } else if (HAS_PCH_SPLIT(i915)) {
1739 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1740 gt->ngtier = 1;
1741 }
1742
1743 gt->eir = intel_uncore_read(uncore, EIR);
1744 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1745 }
1746
1747
1748
1749
1750
1751 static void gt_record_global_regs(struct intel_gt_coredump *gt)
1752 {
1753 struct intel_uncore *uncore = gt->_gt->uncore;
1754 struct drm_i915_private *i915 = uncore->i915;
1755 int i;
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 if (IS_VALLEYVIEW(i915))
1768 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1769
1770 if (GRAPHICS_VER(i915) == 7)
1771 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1772
1773 if (GRAPHICS_VER(i915) >= 12) {
1774 gt->fault_data0 = intel_uncore_read(uncore,
1775 GEN12_FAULT_TLB_DATA0);
1776 gt->fault_data1 = intel_uncore_read(uncore,
1777 GEN12_FAULT_TLB_DATA1);
1778 } else if (GRAPHICS_VER(i915) >= 8) {
1779 gt->fault_data0 = intel_uncore_read(uncore,
1780 GEN8_FAULT_TLB_DATA0);
1781 gt->fault_data1 = intel_uncore_read(uncore,
1782 GEN8_FAULT_TLB_DATA1);
1783 }
1784
1785 if (GRAPHICS_VER(i915) == 6) {
1786 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1787 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1788 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1789 }
1790
1791
1792 if (GRAPHICS_VER(i915) >= 7)
1793 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1794
1795 if (GRAPHICS_VER(i915) >= 6) {
1796 if (GRAPHICS_VER(i915) < 12) {
1797 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1798 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1799 }
1800 }
1801
1802
1803 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1804 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1805 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1806 }
1807
1808 if (IS_GRAPHICS_VER(i915, 8, 11))
1809 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1810
1811 if (GRAPHICS_VER(i915) == 12)
1812 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1813
1814 if (GRAPHICS_VER(i915) >= 12) {
1815 for (i = 0; i < I915_MAX_SFC; i++) {
1816
1817
1818
1819
1820
1821 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1822 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1823 continue;
1824
1825 gt->sfc_done[i] =
1826 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1827 }
1828
1829 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1830 }
1831 }
1832
1833 static void gt_record_info(struct intel_gt_coredump *gt)
1834 {
1835 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1849 {
1850
1851
1852
1853
1854
1855
1856 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1857 }
1858
1859 static const char *error_msg(struct i915_gpu_coredump *error)
1860 {
1861 struct intel_engine_coredump *first = NULL;
1862 unsigned int hung_classes = 0;
1863 struct intel_gt_coredump *gt;
1864 int len;
1865
1866 for (gt = error->gt; gt; gt = gt->next) {
1867 struct intel_engine_coredump *cs;
1868
1869 for (cs = gt->engine; cs; cs = cs->next) {
1870 if (cs->hung) {
1871 hung_classes |= BIT(cs->engine->uabi_class);
1872 if (!first)
1873 first = cs;
1874 }
1875 }
1876 }
1877
1878 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1879 "GPU HANG: ecode %d:%x:%08x",
1880 GRAPHICS_VER(error->i915), hung_classes,
1881 generate_ecode(first));
1882 if (first && first->context.pid) {
1883
1884 len += scnprintf(error->error_msg + len,
1885 sizeof(error->error_msg) - len,
1886 ", in %s [%d]",
1887 first->context.comm, first->context.pid);
1888 }
1889
1890 return error->error_msg;
1891 }
1892
1893 static void capture_gen(struct i915_gpu_coredump *error)
1894 {
1895 struct drm_i915_private *i915 = error->i915;
1896
1897 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1898 error->suspended = i915->runtime_pm.suspended;
1899
1900 error->iommu = i915_vtd_active(i915);
1901 error->reset_count = i915_reset_count(&i915->gpu_error);
1902 error->suspend_count = i915->suspend_count;
1903
1904 i915_params_copy(&error->params, &i915->params);
1905 memcpy(&error->device_info,
1906 INTEL_INFO(i915),
1907 sizeof(error->device_info));
1908 memcpy(&error->runtime_info,
1909 RUNTIME_INFO(i915),
1910 sizeof(error->runtime_info));
1911 error->driver_caps = i915->caps;
1912 }
1913
1914 struct i915_gpu_coredump *
1915 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1916 {
1917 struct i915_gpu_coredump *error;
1918
1919 if (!i915->params.error_capture)
1920 return NULL;
1921
1922 error = kzalloc(sizeof(*error), gfp);
1923 if (!error)
1924 return NULL;
1925
1926 kref_init(&error->ref);
1927 error->i915 = i915;
1928
1929 error->time = ktime_get_real();
1930 error->boottime = ktime_get_boottime();
1931 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
1932 error->capture = jiffies;
1933
1934 capture_gen(error);
1935
1936 return error;
1937 }
1938
1939 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1940
1941 struct intel_gt_coredump *
1942 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
1943 {
1944 struct intel_gt_coredump *gc;
1945
1946 gc = kzalloc(sizeof(*gc), gfp);
1947 if (!gc)
1948 return NULL;
1949
1950 gc->_gt = gt;
1951 gc->awake = intel_gt_pm_is_awake(gt);
1952
1953 gt_record_display_regs(gc);
1954 gt_record_global_nonguc_regs(gc);
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
1966 gt_record_global_regs(gc);
1967
1968 gt_record_fences(gc);
1969
1970 return gc;
1971 }
1972
1973 struct i915_vma_compress *
1974 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1975 {
1976 struct i915_vma_compress *compress;
1977
1978 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1979 if (!compress)
1980 return NULL;
1981
1982 if (!compress_init(compress)) {
1983 kfree(compress);
1984 return NULL;
1985 }
1986
1987 return compress;
1988 }
1989
1990 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1991 struct i915_vma_compress *compress)
1992 {
1993 if (!compress)
1994 return;
1995
1996 compress_fini(compress);
1997 kfree(compress);
1998 }
1999
2000 static struct i915_gpu_coredump *
2001 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2002 {
2003 struct drm_i915_private *i915 = gt->i915;
2004 struct i915_gpu_coredump *error;
2005
2006
2007 error = READ_ONCE(i915->gpu_error.first_error);
2008 if (IS_ERR(error))
2009 return error;
2010
2011 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2012 if (!error)
2013 return ERR_PTR(-ENOMEM);
2014
2015 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2016 if (error->gt) {
2017 struct i915_vma_compress *compress;
2018
2019 compress = i915_vma_capture_prepare(error->gt);
2020 if (!compress) {
2021 kfree(error->gt);
2022 kfree(error);
2023 return ERR_PTR(-ENOMEM);
2024 }
2025
2026 if (INTEL_INFO(i915)->has_gt_uc) {
2027 error->gt->uc = gt_record_uc(error->gt, compress);
2028 if (error->gt->uc) {
2029 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2030 error->gt->uc->is_guc_capture = true;
2031 else
2032 GEM_BUG_ON(error->gt->uc->is_guc_capture);
2033 }
2034 }
2035
2036 gt_record_info(error->gt);
2037 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2038
2039
2040 i915_vma_capture_finish(error->gt, compress);
2041
2042 error->simulated |= error->gt->simulated;
2043 }
2044
2045 error->overlay = intel_overlay_capture_error_state(i915);
2046
2047 return error;
2048 }
2049
2050 struct i915_gpu_coredump *
2051 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2052 {
2053 static DEFINE_MUTEX(capture_mutex);
2054 int ret = mutex_lock_interruptible(&capture_mutex);
2055 struct i915_gpu_coredump *dump;
2056
2057 if (ret)
2058 return ERR_PTR(ret);
2059
2060 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2061 mutex_unlock(&capture_mutex);
2062
2063 return dump;
2064 }
2065
2066 void i915_error_state_store(struct i915_gpu_coredump *error)
2067 {
2068 struct drm_i915_private *i915;
2069 static bool warned;
2070
2071 if (IS_ERR_OR_NULL(error))
2072 return;
2073
2074 i915 = error->i915;
2075 drm_info(&i915->drm, "%s\n", error_msg(error));
2076
2077 if (error->simulated ||
2078 cmpxchg(&i915->gpu_error.first_error, NULL, error))
2079 return;
2080
2081 i915_gpu_coredump_get(error);
2082
2083 if (!xchg(&warned, true) &&
2084 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2085 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2086 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2087 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2088 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2089 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2090 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2091 i915->drm.primary->index);
2092 }
2093 }
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106 void i915_capture_error_state(struct intel_gt *gt,
2107 intel_engine_mask_t engine_mask, u32 dump_flags)
2108 {
2109 struct i915_gpu_coredump *error;
2110
2111 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2112 if (IS_ERR(error)) {
2113 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
2114 return;
2115 }
2116
2117 i915_error_state_store(error);
2118 i915_gpu_coredump_put(error);
2119 }
2120
2121 struct i915_gpu_coredump *
2122 i915_first_error_state(struct drm_i915_private *i915)
2123 {
2124 struct i915_gpu_coredump *error;
2125
2126 spin_lock_irq(&i915->gpu_error.lock);
2127 error = i915->gpu_error.first_error;
2128 if (!IS_ERR_OR_NULL(error))
2129 i915_gpu_coredump_get(error);
2130 spin_unlock_irq(&i915->gpu_error.lock);
2131
2132 return error;
2133 }
2134
2135 void i915_reset_error_state(struct drm_i915_private *i915)
2136 {
2137 struct i915_gpu_coredump *error;
2138
2139 spin_lock_irq(&i915->gpu_error.lock);
2140 error = i915->gpu_error.first_error;
2141 if (error != ERR_PTR(-ENODEV))
2142 i915->gpu_error.first_error = NULL;
2143 spin_unlock_irq(&i915->gpu_error.lock);
2144
2145 if (!IS_ERR_OR_NULL(error))
2146 i915_gpu_coredump_put(error);
2147 }
2148
2149 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2150 {
2151 spin_lock_irq(&i915->gpu_error.lock);
2152 if (!i915->gpu_error.first_error)
2153 i915->gpu_error.first_error = ERR_PTR(err);
2154 spin_unlock_irq(&i915->gpu_error.lock);
2155 }