0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/kthread.h>
0027 #include <linux/pci.h>
0028 #include <linux/uaccess.h>
0029 #include <linux/pm_runtime.h>
0030
0031 #include "amdgpu.h"
0032 #include "amdgpu_pm.h"
0033 #include "amdgpu_dm_debugfs.h"
0034 #include "amdgpu_ras.h"
0035 #include "amdgpu_rap.h"
0036 #include "amdgpu_securedisplay.h"
0037 #include "amdgpu_fw_attestation.h"
0038 #include "amdgpu_umr.h"
0039
0040 #include "amdgpu_reset.h"
0041 #include "amdgpu_psp_ta.h"
0042
0043 #if defined(CONFIG_DEBUG_FS)
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
0073 char __user *buf, size_t size, loff_t *pos)
0074 {
0075 struct amdgpu_device *adev = file_inode(f)->i_private;
0076 ssize_t result = 0;
0077 int r;
0078 bool pm_pg_lock, use_bank, use_ring;
0079 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
0080
0081 pm_pg_lock = use_bank = use_ring = false;
0082 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
0083
0084 if (size & 0x3 || *pos & 0x3 ||
0085 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
0086 return -EINVAL;
0087
0088
0089 pm_pg_lock = (*pos >> 23) & 1;
0090
0091 if (*pos & (1ULL << 62)) {
0092 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
0093 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
0094 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
0095
0096 if (se_bank == 0x3FF)
0097 se_bank = 0xFFFFFFFF;
0098 if (sh_bank == 0x3FF)
0099 sh_bank = 0xFFFFFFFF;
0100 if (instance_bank == 0x3FF)
0101 instance_bank = 0xFFFFFFFF;
0102 use_bank = true;
0103 } else if (*pos & (1ULL << 61)) {
0104
0105 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
0106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
0107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
0108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
0109
0110 use_ring = true;
0111 } else {
0112 use_bank = use_ring = false;
0113 }
0114
0115 *pos &= (1UL << 22) - 1;
0116
0117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0118 if (r < 0) {
0119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0120 return r;
0121 }
0122
0123 r = amdgpu_virt_enable_access_debugfs(adev);
0124 if (r < 0) {
0125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0126 return r;
0127 }
0128
0129 if (use_bank) {
0130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
0131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
0132 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0134 amdgpu_virt_disable_access_debugfs(adev);
0135 return -EINVAL;
0136 }
0137 mutex_lock(&adev->grbm_idx_mutex);
0138 amdgpu_gfx_select_se_sh(adev, se_bank,
0139 sh_bank, instance_bank);
0140 } else if (use_ring) {
0141 mutex_lock(&adev->srbm_mutex);
0142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
0143 }
0144
0145 if (pm_pg_lock)
0146 mutex_lock(&adev->pm.mutex);
0147
0148 while (size) {
0149 uint32_t value;
0150
0151 if (read) {
0152 value = RREG32(*pos >> 2);
0153 r = put_user(value, (uint32_t *)buf);
0154 } else {
0155 r = get_user(value, (uint32_t *)buf);
0156 if (!r)
0157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
0158 }
0159 if (r) {
0160 result = r;
0161 goto end;
0162 }
0163
0164 result += 4;
0165 buf += 4;
0166 *pos += 4;
0167 size -= 4;
0168 }
0169
0170 end:
0171 if (use_bank) {
0172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
0173 mutex_unlock(&adev->grbm_idx_mutex);
0174 } else if (use_ring) {
0175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
0176 mutex_unlock(&adev->srbm_mutex);
0177 }
0178
0179 if (pm_pg_lock)
0180 mutex_unlock(&adev->pm.mutex);
0181
0182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0184
0185 amdgpu_virt_disable_access_debugfs(adev);
0186 return result;
0187 }
0188
0189
0190
0191
0192 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
0193 size_t size, loff_t *pos)
0194 {
0195 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
0196 }
0197
0198
0199
0200
0201 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
0202 size_t size, loff_t *pos)
0203 {
0204 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
0205 }
0206
0207 static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
0208 {
0209 struct amdgpu_debugfs_regs2_data *rd;
0210
0211 rd = kzalloc(sizeof *rd, GFP_KERNEL);
0212 if (!rd)
0213 return -ENOMEM;
0214 rd->adev = file_inode(file)->i_private;
0215 file->private_data = rd;
0216 mutex_init(&rd->lock);
0217
0218 return 0;
0219 }
0220
0221 static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
0222 {
0223 struct amdgpu_debugfs_regs2_data *rd = file->private_data;
0224 mutex_destroy(&rd->lock);
0225 kfree(file->private_data);
0226 return 0;
0227 }
0228
0229 static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
0230 {
0231 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
0232 struct amdgpu_device *adev = rd->adev;
0233 ssize_t result = 0;
0234 int r;
0235 uint32_t value;
0236
0237 if (size & 0x3 || offset & 0x3)
0238 return -EINVAL;
0239
0240 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0241 if (r < 0) {
0242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0243 return r;
0244 }
0245
0246 r = amdgpu_virt_enable_access_debugfs(adev);
0247 if (r < 0) {
0248 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0249 return r;
0250 }
0251
0252 mutex_lock(&rd->lock);
0253
0254 if (rd->id.use_grbm) {
0255 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
0256 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
0257 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0258 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0259 amdgpu_virt_disable_access_debugfs(adev);
0260 mutex_unlock(&rd->lock);
0261 return -EINVAL;
0262 }
0263 mutex_lock(&adev->grbm_idx_mutex);
0264 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
0265 rd->id.grbm.sh,
0266 rd->id.grbm.instance);
0267 }
0268
0269 if (rd->id.use_srbm) {
0270 mutex_lock(&adev->srbm_mutex);
0271 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
0272 rd->id.srbm.queue, rd->id.srbm.vmid);
0273 }
0274
0275 if (rd->id.pg_lock)
0276 mutex_lock(&adev->pm.mutex);
0277
0278 while (size) {
0279 if (!write_en) {
0280 value = RREG32(offset >> 2);
0281 r = put_user(value, (uint32_t *)buf);
0282 } else {
0283 r = get_user(value, (uint32_t *)buf);
0284 if (!r)
0285 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
0286 }
0287 if (r) {
0288 result = r;
0289 goto end;
0290 }
0291 offset += 4;
0292 size -= 4;
0293 result += 4;
0294 buf += 4;
0295 }
0296 end:
0297 if (rd->id.use_grbm) {
0298 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
0299 mutex_unlock(&adev->grbm_idx_mutex);
0300 }
0301
0302 if (rd->id.use_srbm) {
0303 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
0304 mutex_unlock(&adev->srbm_mutex);
0305 }
0306
0307 if (rd->id.pg_lock)
0308 mutex_unlock(&adev->pm.mutex);
0309
0310 mutex_unlock(&rd->lock);
0311
0312 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0313 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0314
0315 amdgpu_virt_disable_access_debugfs(adev);
0316 return result;
0317 }
0318
0319 static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
0320 {
0321 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
0322 int r;
0323
0324 switch (cmd) {
0325 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
0326 mutex_lock(&rd->lock);
0327 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
0328 mutex_unlock(&rd->lock);
0329 return r ? -EINVAL : 0;
0330 default:
0331 return -EINVAL;
0332 }
0333 return 0;
0334 }
0335
0336 static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
0337 {
0338 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
0339 }
0340
0341 static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
0342 {
0343 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
0360 size_t size, loff_t *pos)
0361 {
0362 struct amdgpu_device *adev = file_inode(f)->i_private;
0363 ssize_t result = 0;
0364 int r;
0365
0366 if (size & 0x3 || *pos & 0x3)
0367 return -EINVAL;
0368
0369 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0370 if (r < 0) {
0371 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0372 return r;
0373 }
0374
0375 r = amdgpu_virt_enable_access_debugfs(adev);
0376 if (r < 0) {
0377 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0378 return r;
0379 }
0380
0381 while (size) {
0382 uint32_t value;
0383
0384 value = RREG32_PCIE(*pos);
0385 r = put_user(value, (uint32_t *)buf);
0386 if (r)
0387 goto out;
0388
0389 result += 4;
0390 buf += 4;
0391 *pos += 4;
0392 size -= 4;
0393 }
0394
0395 r = result;
0396 out:
0397 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0398 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0399 amdgpu_virt_disable_access_debugfs(adev);
0400 return r;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
0416 size_t size, loff_t *pos)
0417 {
0418 struct amdgpu_device *adev = file_inode(f)->i_private;
0419 ssize_t result = 0;
0420 int r;
0421
0422 if (size & 0x3 || *pos & 0x3)
0423 return -EINVAL;
0424
0425 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0426 if (r < 0) {
0427 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0428 return r;
0429 }
0430
0431 r = amdgpu_virt_enable_access_debugfs(adev);
0432 if (r < 0) {
0433 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0434 return r;
0435 }
0436
0437 while (size) {
0438 uint32_t value;
0439
0440 r = get_user(value, (uint32_t *)buf);
0441 if (r)
0442 goto out;
0443
0444 WREG32_PCIE(*pos, value);
0445
0446 result += 4;
0447 buf += 4;
0448 *pos += 4;
0449 size -= 4;
0450 }
0451
0452 r = result;
0453 out:
0454 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0455 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0456 amdgpu_virt_disable_access_debugfs(adev);
0457 return r;
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
0473 size_t size, loff_t *pos)
0474 {
0475 struct amdgpu_device *adev = file_inode(f)->i_private;
0476 ssize_t result = 0;
0477 int r;
0478
0479 if (size & 0x3 || *pos & 0x3)
0480 return -EINVAL;
0481
0482 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0483 if (r < 0) {
0484 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0485 return r;
0486 }
0487
0488 r = amdgpu_virt_enable_access_debugfs(adev);
0489 if (r < 0) {
0490 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0491 return r;
0492 }
0493
0494 while (size) {
0495 uint32_t value;
0496
0497 value = RREG32_DIDT(*pos >> 2);
0498 r = put_user(value, (uint32_t *)buf);
0499 if (r)
0500 goto out;
0501
0502 result += 4;
0503 buf += 4;
0504 *pos += 4;
0505 size -= 4;
0506 }
0507
0508 r = result;
0509 out:
0510 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0511 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0512 amdgpu_virt_disable_access_debugfs(adev);
0513 return r;
0514 }
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
0529 size_t size, loff_t *pos)
0530 {
0531 struct amdgpu_device *adev = file_inode(f)->i_private;
0532 ssize_t result = 0;
0533 int r;
0534
0535 if (size & 0x3 || *pos & 0x3)
0536 return -EINVAL;
0537
0538 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0539 if (r < 0) {
0540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0541 return r;
0542 }
0543
0544 r = amdgpu_virt_enable_access_debugfs(adev);
0545 if (r < 0) {
0546 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0547 return r;
0548 }
0549
0550 while (size) {
0551 uint32_t value;
0552
0553 r = get_user(value, (uint32_t *)buf);
0554 if (r)
0555 goto out;
0556
0557 WREG32_DIDT(*pos >> 2, value);
0558
0559 result += 4;
0560 buf += 4;
0561 *pos += 4;
0562 size -= 4;
0563 }
0564
0565 r = result;
0566 out:
0567 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0568 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0569 amdgpu_virt_disable_access_debugfs(adev);
0570 return r;
0571 }
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
0586 size_t size, loff_t *pos)
0587 {
0588 struct amdgpu_device *adev = file_inode(f)->i_private;
0589 ssize_t result = 0;
0590 int r;
0591
0592 if (size & 0x3 || *pos & 0x3)
0593 return -EINVAL;
0594
0595 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0596 if (r < 0) {
0597 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0598 return r;
0599 }
0600
0601 r = amdgpu_virt_enable_access_debugfs(adev);
0602 if (r < 0) {
0603 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0604 return r;
0605 }
0606
0607 while (size) {
0608 uint32_t value;
0609
0610 value = RREG32_SMC(*pos);
0611 r = put_user(value, (uint32_t *)buf);
0612 if (r)
0613 goto out;
0614
0615 result += 4;
0616 buf += 4;
0617 *pos += 4;
0618 size -= 4;
0619 }
0620
0621 r = result;
0622 out:
0623 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0624 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0625 amdgpu_virt_disable_access_debugfs(adev);
0626 return r;
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
0642 size_t size, loff_t *pos)
0643 {
0644 struct amdgpu_device *adev = file_inode(f)->i_private;
0645 ssize_t result = 0;
0646 int r;
0647
0648 if (size & 0x3 || *pos & 0x3)
0649 return -EINVAL;
0650
0651 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0652 if (r < 0) {
0653 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0654 return r;
0655 }
0656
0657 r = amdgpu_virt_enable_access_debugfs(adev);
0658 if (r < 0) {
0659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0660 return r;
0661 }
0662
0663 while (size) {
0664 uint32_t value;
0665
0666 r = get_user(value, (uint32_t *)buf);
0667 if (r)
0668 goto out;
0669
0670 WREG32_SMC(*pos, value);
0671
0672 result += 4;
0673 buf += 4;
0674 *pos += 4;
0675 size -= 4;
0676 }
0677
0678 r = result;
0679 out:
0680 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0681 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0682 amdgpu_virt_disable_access_debugfs(adev);
0683 return r;
0684 }
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
0701 size_t size, loff_t *pos)
0702 {
0703 struct amdgpu_device *adev = file_inode(f)->i_private;
0704 ssize_t result = 0;
0705 int r;
0706 uint32_t *config, no_regs = 0;
0707
0708 if (size & 0x3 || *pos & 0x3)
0709 return -EINVAL;
0710
0711 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
0712 if (!config)
0713 return -ENOMEM;
0714
0715
0716 config[no_regs++] = 5;
0717 config[no_regs++] = adev->gfx.config.max_shader_engines;
0718 config[no_regs++] = adev->gfx.config.max_tile_pipes;
0719 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
0720 config[no_regs++] = adev->gfx.config.max_sh_per_se;
0721 config[no_regs++] = adev->gfx.config.max_backends_per_se;
0722 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
0723 config[no_regs++] = adev->gfx.config.max_gprs;
0724 config[no_regs++] = adev->gfx.config.max_gs_threads;
0725 config[no_regs++] = adev->gfx.config.max_hw_contexts;
0726 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
0727 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
0728 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
0729 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
0730 config[no_regs++] = adev->gfx.config.num_tile_pipes;
0731 config[no_regs++] = adev->gfx.config.backend_enable_mask;
0732 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
0733 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
0734 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
0735 config[no_regs++] = adev->gfx.config.num_gpus;
0736 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
0737 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
0738 config[no_regs++] = adev->gfx.config.gb_addr_config;
0739 config[no_regs++] = adev->gfx.config.num_rbs;
0740
0741
0742 config[no_regs++] = adev->rev_id;
0743 config[no_regs++] = lower_32_bits(adev->pg_flags);
0744 config[no_regs++] = lower_32_bits(adev->cg_flags);
0745
0746
0747 config[no_regs++] = adev->family;
0748 config[no_regs++] = adev->external_rev_id;
0749
0750
0751 config[no_regs++] = adev->pdev->device;
0752 config[no_regs++] = adev->pdev->revision;
0753 config[no_regs++] = adev->pdev->subsystem_device;
0754 config[no_regs++] = adev->pdev->subsystem_vendor;
0755
0756
0757 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
0758
0759
0760 config[no_regs++] = upper_32_bits(adev->pg_flags);
0761 config[no_regs++] = upper_32_bits(adev->cg_flags);
0762
0763 while (size && (*pos < no_regs * 4)) {
0764 uint32_t value;
0765
0766 value = config[*pos >> 2];
0767 r = put_user(value, (uint32_t *)buf);
0768 if (r) {
0769 kfree(config);
0770 return r;
0771 }
0772
0773 result += 4;
0774 buf += 4;
0775 *pos += 4;
0776 size -= 4;
0777 }
0778
0779 kfree(config);
0780 return result;
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
0797 size_t size, loff_t *pos)
0798 {
0799 struct amdgpu_device *adev = file_inode(f)->i_private;
0800 int idx, x, outsize, r, valuesize;
0801 uint32_t values[16];
0802
0803 if (size & 3 || *pos & 0x3)
0804 return -EINVAL;
0805
0806 if (!adev->pm.dpm_enabled)
0807 return -EINVAL;
0808
0809
0810 idx = *pos >> 2;
0811
0812 valuesize = sizeof(values);
0813
0814 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0815 if (r < 0) {
0816 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0817 return r;
0818 }
0819
0820 r = amdgpu_virt_enable_access_debugfs(adev);
0821 if (r < 0) {
0822 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0823 return r;
0824 }
0825
0826 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
0827
0828 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0829 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0830
0831 if (r) {
0832 amdgpu_virt_disable_access_debugfs(adev);
0833 return r;
0834 }
0835
0836 if (size > valuesize) {
0837 amdgpu_virt_disable_access_debugfs(adev);
0838 return -EINVAL;
0839 }
0840
0841 outsize = 0;
0842 x = 0;
0843 if (!r) {
0844 while (size) {
0845 r = put_user(values[x++], (int32_t *)buf);
0846 buf += 4;
0847 size -= 4;
0848 outsize += 4;
0849 }
0850 }
0851
0852 amdgpu_virt_disable_access_debugfs(adev);
0853 return !r ? outsize : r;
0854 }
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
0878 size_t size, loff_t *pos)
0879 {
0880 struct amdgpu_device *adev = f->f_inode->i_private;
0881 int r, x;
0882 ssize_t result = 0;
0883 uint32_t offset, se, sh, cu, wave, simd, data[32];
0884
0885 if (size & 3 || *pos & 3)
0886 return -EINVAL;
0887
0888
0889 offset = (*pos & GENMASK_ULL(6, 0));
0890 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
0891 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
0892 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
0893 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
0894 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
0895
0896 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0897 if (r < 0) {
0898 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0899 return r;
0900 }
0901
0902 r = amdgpu_virt_enable_access_debugfs(adev);
0903 if (r < 0) {
0904 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0905 return r;
0906 }
0907
0908
0909 mutex_lock(&adev->grbm_idx_mutex);
0910 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
0911
0912 x = 0;
0913 if (adev->gfx.funcs->read_wave_data)
0914 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
0915
0916 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
0917 mutex_unlock(&adev->grbm_idx_mutex);
0918
0919 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0920 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0921
0922 if (!x) {
0923 amdgpu_virt_disable_access_debugfs(adev);
0924 return -EINVAL;
0925 }
0926
0927 while (size && (offset < x * 4)) {
0928 uint32_t value;
0929
0930 value = data[offset >> 2];
0931 r = put_user(value, (uint32_t *)buf);
0932 if (r) {
0933 amdgpu_virt_disable_access_debugfs(adev);
0934 return r;
0935 }
0936
0937 result += 4;
0938 buf += 4;
0939 offset += 4;
0940 size -= 4;
0941 }
0942
0943 amdgpu_virt_disable_access_debugfs(adev);
0944 return result;
0945 }
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
0970 size_t size, loff_t *pos)
0971 {
0972 struct amdgpu_device *adev = f->f_inode->i_private;
0973 int r;
0974 ssize_t result = 0;
0975 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
0976
0977 if (size > 4096 || size & 3 || *pos & 3)
0978 return -EINVAL;
0979
0980
0981 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
0982 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
0983 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
0984 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
0985 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
0986 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
0987 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
0988 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
0989
0990 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
0991 if (!data)
0992 return -ENOMEM;
0993
0994 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
0995 if (r < 0)
0996 goto err;
0997
0998 r = amdgpu_virt_enable_access_debugfs(adev);
0999 if (r < 0)
1000 goto err;
1001
1002
1003 mutex_lock(&adev->grbm_idx_mutex);
1004 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
1005
1006 if (bank == 0) {
1007 if (adev->gfx.funcs->read_wave_vgprs)
1008 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
1009 } else {
1010 if (adev->gfx.funcs->read_wave_sgprs)
1011 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
1012 }
1013
1014 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1015 mutex_unlock(&adev->grbm_idx_mutex);
1016
1017 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1018 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1019
1020 while (size) {
1021 uint32_t value;
1022
1023 value = data[result >> 2];
1024 r = put_user(value, (uint32_t *)buf);
1025 if (r) {
1026 amdgpu_virt_disable_access_debugfs(adev);
1027 goto err;
1028 }
1029
1030 result += 4;
1031 buf += 4;
1032 size -= 4;
1033 }
1034
1035 kfree(data);
1036 amdgpu_virt_disable_access_debugfs(adev);
1037 return result;
1038
1039 err:
1040 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1041 kfree(data);
1042 return r;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1056 size_t size, loff_t *pos)
1057 {
1058 struct amdgpu_device *adev = file_inode(f)->i_private;
1059 ssize_t result = 0;
1060 int r;
1061
1062 if (size & 0x3 || *pos & 0x3)
1063 return -EINVAL;
1064
1065 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1066 if (r < 0) {
1067 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1068 return r;
1069 }
1070
1071 while (size) {
1072 uint32_t value;
1073
1074 r = get_user(value, (uint32_t *)buf);
1075 if (r)
1076 goto out;
1077
1078 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1079
1080 result += 4;
1081 buf += 4;
1082 *pos += 4;
1083 size -= 4;
1084 }
1085
1086 r = result;
1087 out:
1088 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1089 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1090
1091 return r;
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1104 size_t size, loff_t *pos)
1105 {
1106 struct amdgpu_device *adev = file_inode(f)->i_private;
1107 ssize_t result = 0;
1108 int r;
1109
1110 if (size & 0x3 || *pos & 0x3)
1111 return -EINVAL;
1112
1113 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1114 if (r < 0) {
1115 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1116 return r;
1117 }
1118
1119 while (size) {
1120 u32 value = adev->gfx.gfx_off_state;
1121
1122 r = put_user(value, (u32 *)buf);
1123 if (r)
1124 goto out;
1125
1126 result += 4;
1127 buf += 4;
1128 *pos += 4;
1129 size -= 4;
1130 }
1131
1132 r = result;
1133 out:
1134 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1135 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1136
1137 return r;
1138 }
1139
1140 static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
1141 size_t size, loff_t *pos)
1142 {
1143 struct amdgpu_device *adev = file_inode(f)->i_private;
1144 ssize_t result = 0;
1145 int r;
1146
1147 if (size & 0x3 || *pos & 0x3)
1148 return -EINVAL;
1149
1150 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1151 if (r < 0) {
1152 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1153 return r;
1154 }
1155
1156 while (size) {
1157 u32 value;
1158
1159 r = amdgpu_get_gfx_off_status(adev, &value);
1160 if (r)
1161 goto out;
1162
1163 r = put_user(value, (u32 *)buf);
1164 if (r)
1165 goto out;
1166
1167 result += 4;
1168 buf += 4;
1169 *pos += 4;
1170 size -= 4;
1171 }
1172
1173 r = result;
1174 out:
1175 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1176 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1177
1178 return r;
1179 }
1180
1181 static const struct file_operations amdgpu_debugfs_regs2_fops = {
1182 .owner = THIS_MODULE,
1183 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
1184 .read = amdgpu_debugfs_regs2_read,
1185 .write = amdgpu_debugfs_regs2_write,
1186 .open = amdgpu_debugfs_regs2_open,
1187 .release = amdgpu_debugfs_regs2_release,
1188 .llseek = default_llseek
1189 };
1190
1191 static const struct file_operations amdgpu_debugfs_regs_fops = {
1192 .owner = THIS_MODULE,
1193 .read = amdgpu_debugfs_regs_read,
1194 .write = amdgpu_debugfs_regs_write,
1195 .llseek = default_llseek
1196 };
1197 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1198 .owner = THIS_MODULE,
1199 .read = amdgpu_debugfs_regs_didt_read,
1200 .write = amdgpu_debugfs_regs_didt_write,
1201 .llseek = default_llseek
1202 };
1203 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1204 .owner = THIS_MODULE,
1205 .read = amdgpu_debugfs_regs_pcie_read,
1206 .write = amdgpu_debugfs_regs_pcie_write,
1207 .llseek = default_llseek
1208 };
1209 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1210 .owner = THIS_MODULE,
1211 .read = amdgpu_debugfs_regs_smc_read,
1212 .write = amdgpu_debugfs_regs_smc_write,
1213 .llseek = default_llseek
1214 };
1215
1216 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1217 .owner = THIS_MODULE,
1218 .read = amdgpu_debugfs_gca_config_read,
1219 .llseek = default_llseek
1220 };
1221
1222 static const struct file_operations amdgpu_debugfs_sensors_fops = {
1223 .owner = THIS_MODULE,
1224 .read = amdgpu_debugfs_sensor_read,
1225 .llseek = default_llseek
1226 };
1227
1228 static const struct file_operations amdgpu_debugfs_wave_fops = {
1229 .owner = THIS_MODULE,
1230 .read = amdgpu_debugfs_wave_read,
1231 .llseek = default_llseek
1232 };
1233 static const struct file_operations amdgpu_debugfs_gpr_fops = {
1234 .owner = THIS_MODULE,
1235 .read = amdgpu_debugfs_gpr_read,
1236 .llseek = default_llseek
1237 };
1238
1239 static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1240 .owner = THIS_MODULE,
1241 .read = amdgpu_debugfs_gfxoff_read,
1242 .write = amdgpu_debugfs_gfxoff_write,
1243 .llseek = default_llseek
1244 };
1245
1246 static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
1247 .owner = THIS_MODULE,
1248 .read = amdgpu_debugfs_gfxoff_status_read,
1249 .llseek = default_llseek
1250 };
1251
1252 static const struct file_operations *debugfs_regs[] = {
1253 &amdgpu_debugfs_regs_fops,
1254 &amdgpu_debugfs_regs2_fops,
1255 &amdgpu_debugfs_regs_didt_fops,
1256 &amdgpu_debugfs_regs_pcie_fops,
1257 &amdgpu_debugfs_regs_smc_fops,
1258 &amdgpu_debugfs_gca_config_fops,
1259 &amdgpu_debugfs_sensors_fops,
1260 &amdgpu_debugfs_wave_fops,
1261 &amdgpu_debugfs_gpr_fops,
1262 &amdgpu_debugfs_gfxoff_fops,
1263 &amdgpu_debugfs_gfxoff_status_fops,
1264 };
1265
1266 static const char *debugfs_regs_names[] = {
1267 "amdgpu_regs",
1268 "amdgpu_regs2",
1269 "amdgpu_regs_didt",
1270 "amdgpu_regs_pcie",
1271 "amdgpu_regs_smc",
1272 "amdgpu_gca_config",
1273 "amdgpu_sensors",
1274 "amdgpu_wave",
1275 "amdgpu_gpr",
1276 "amdgpu_gfxoff",
1277 "amdgpu_gfxoff_status",
1278 };
1279
1280
1281
1282
1283
1284
1285
1286 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1287 {
1288 struct drm_minor *minor = adev_to_drm(adev)->primary;
1289 struct dentry *ent, *root = minor->debugfs_root;
1290 unsigned int i;
1291
1292 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1293 ent = debugfs_create_file(debugfs_regs_names[i],
1294 S_IFREG | S_IRUGO, root,
1295 adev, debugfs_regs[i]);
1296 if (!i && !IS_ERR_OR_NULL(ent))
1297 i_size_write(ent->d_inode, adev->rmmio_size);
1298 }
1299
1300 return 0;
1301 }
1302
1303 static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
1304 {
1305 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1306 struct drm_device *dev = adev_to_drm(adev);
1307 int r = 0, i;
1308
1309 r = pm_runtime_get_sync(dev->dev);
1310 if (r < 0) {
1311 pm_runtime_put_autosuspend(dev->dev);
1312 return r;
1313 }
1314
1315
1316 r = down_write_killable(&adev->reset_domain->sem);
1317 if (r)
1318 return r;
1319
1320
1321 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1322 struct amdgpu_ring *ring = adev->rings[i];
1323
1324 if (!ring || !ring->sched.thread)
1325 continue;
1326 kthread_park(ring->sched.thread);
1327 }
1328
1329 seq_printf(m, "run ib test:\n");
1330 r = amdgpu_ib_ring_tests(adev);
1331 if (r)
1332 seq_printf(m, "ib ring tests failed (%d).\n", r);
1333 else
1334 seq_printf(m, "ib ring tests passed.\n");
1335
1336
1337 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1338 struct amdgpu_ring *ring = adev->rings[i];
1339
1340 if (!ring || !ring->sched.thread)
1341 continue;
1342 kthread_unpark(ring->sched.thread);
1343 }
1344
1345 up_write(&adev->reset_domain->sem);
1346
1347 pm_runtime_mark_last_busy(dev->dev);
1348 pm_runtime_put_autosuspend(dev->dev);
1349
1350 return 0;
1351 }
1352
1353 static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
1354 {
1355 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1356 struct drm_device *dev = adev_to_drm(adev);
1357 int r;
1358
1359 r = pm_runtime_get_sync(dev->dev);
1360 if (r < 0) {
1361 pm_runtime_put_autosuspend(dev->dev);
1362 return r;
1363 }
1364
1365 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
1366
1367 pm_runtime_mark_last_busy(dev->dev);
1368 pm_runtime_put_autosuspend(dev->dev);
1369
1370 return 0;
1371 }
1372
1373
1374 static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
1375 {
1376 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1377 struct drm_device *dev = adev_to_drm(adev);
1378 int r;
1379
1380 r = pm_runtime_get_sync(dev->dev);
1381 if (r < 0) {
1382 pm_runtime_put_autosuspend(dev->dev);
1383 return r;
1384 }
1385
1386 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
1387
1388 pm_runtime_mark_last_busy(dev->dev);
1389 pm_runtime_put_autosuspend(dev->dev);
1390
1391 return 0;
1392 }
1393
1394 static int amdgpu_debugfs_benchmark(void *data, u64 val)
1395 {
1396 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1397 struct drm_device *dev = adev_to_drm(adev);
1398 int r;
1399
1400 r = pm_runtime_get_sync(dev->dev);
1401 if (r < 0) {
1402 pm_runtime_put_autosuspend(dev->dev);
1403 return r;
1404 }
1405
1406 r = amdgpu_benchmark(adev, val);
1407
1408 pm_runtime_mark_last_busy(dev->dev);
1409 pm_runtime_put_autosuspend(dev->dev);
1410
1411 return r;
1412 }
1413
1414 static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
1415 {
1416 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1417 struct drm_device *dev = adev_to_drm(adev);
1418 struct drm_file *file;
1419 int r;
1420
1421 r = mutex_lock_interruptible(&dev->filelist_mutex);
1422 if (r)
1423 return r;
1424
1425 list_for_each_entry(file, &dev->filelist, lhead) {
1426 struct amdgpu_fpriv *fpriv = file->driver_priv;
1427 struct amdgpu_vm *vm = &fpriv->vm;
1428
1429 seq_printf(m, "pid:%d\tProcess:%s ----------\n",
1430 vm->task_info.pid, vm->task_info.process_name);
1431 r = amdgpu_bo_reserve(vm->root.bo, true);
1432 if (r)
1433 break;
1434 amdgpu_debugfs_vm_bo_info(vm, m);
1435 amdgpu_bo_unreserve(vm->root.bo);
1436 }
1437
1438 mutex_unlock(&dev->filelist_mutex);
1439
1440 return r;
1441 }
1442
1443 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
1444 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
1445 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
1446 NULL, "%lld\n");
1447 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
1448 NULL, "%lld\n");
1449 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
1450 "%lld\n");
1451
1452 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1453 struct dma_fence **fences)
1454 {
1455 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1456 uint32_t sync_seq, last_seq;
1457
1458 last_seq = atomic_read(&ring->fence_drv.last_seq);
1459 sync_seq = ring->fence_drv.sync_seq;
1460
1461 last_seq &= drv->num_fences_mask;
1462 sync_seq &= drv->num_fences_mask;
1463
1464 do {
1465 struct dma_fence *fence, **ptr;
1466
1467 ++last_seq;
1468 last_seq &= drv->num_fences_mask;
1469 ptr = &drv->fences[last_seq];
1470
1471 fence = rcu_dereference_protected(*ptr, 1);
1472 RCU_INIT_POINTER(*ptr, NULL);
1473
1474 if (!fence)
1475 continue;
1476
1477 fences[last_seq] = fence;
1478
1479 } while (last_seq != sync_seq);
1480 }
1481
1482 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1483 int length)
1484 {
1485 int i;
1486 struct dma_fence *fence;
1487
1488 for (i = 0; i < length; i++) {
1489 fence = fences[i];
1490 if (!fence)
1491 continue;
1492 dma_fence_signal(fence);
1493 dma_fence_put(fence);
1494 }
1495 }
1496
1497 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1498 {
1499 struct drm_sched_job *s_job;
1500 struct dma_fence *fence;
1501
1502 spin_lock(&sched->job_list_lock);
1503 list_for_each_entry(s_job, &sched->pending_list, list) {
1504 fence = sched->ops->run_job(s_job);
1505 dma_fence_put(fence);
1506 }
1507 spin_unlock(&sched->job_list_lock);
1508 }
1509
1510 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1511 {
1512 struct amdgpu_job *job;
1513 struct drm_sched_job *s_job, *tmp;
1514 uint32_t preempt_seq;
1515 struct dma_fence *fence, **ptr;
1516 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1517 struct drm_gpu_scheduler *sched = &ring->sched;
1518 bool preempted = true;
1519
1520 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1521 return;
1522
1523 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1524 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1525 preempted = false;
1526 goto no_preempt;
1527 }
1528
1529 preempt_seq &= drv->num_fences_mask;
1530 ptr = &drv->fences[preempt_seq];
1531 fence = rcu_dereference_protected(*ptr, 1);
1532
1533 no_preempt:
1534 spin_lock(&sched->job_list_lock);
1535 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
1536 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1537
1538 list_del_init(&s_job->list);
1539 sched->ops->free_job(s_job);
1540 continue;
1541 }
1542 job = to_amdgpu_job(s_job);
1543 if (preempted && (&job->hw_fence) == fence)
1544
1545 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1546 }
1547 spin_unlock(&sched->job_list_lock);
1548 }
1549
1550 static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1551 {
1552 int r, resched, length;
1553 struct amdgpu_ring *ring;
1554 struct dma_fence **fences = NULL;
1555 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1556
1557 if (val >= AMDGPU_MAX_RINGS)
1558 return -EINVAL;
1559
1560 ring = adev->rings[val];
1561
1562 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1563 return -EINVAL;
1564
1565
1566 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1567 return -EBUSY;
1568
1569 length = ring->fence_drv.num_fences_mask + 1;
1570 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1571 if (!fences)
1572 return -ENOMEM;
1573
1574
1575 r = down_read_killable(&adev->reset_domain->sem);
1576 if (r)
1577 goto pro_end;
1578
1579
1580 kthread_park(ring->sched.thread);
1581
1582 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1583
1584
1585 r = amdgpu_ring_preempt_ib(ring);
1586 if (r) {
1587 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1588 goto failure;
1589 }
1590
1591 amdgpu_fence_process(ring);
1592
1593 if (atomic_read(&ring->fence_drv.last_seq) !=
1594 ring->fence_drv.sync_seq) {
1595 DRM_INFO("ring %d was preempted\n", ring->idx);
1596
1597 amdgpu_ib_preempt_mark_partial_job(ring);
1598
1599
1600 amdgpu_ib_preempt_fences_swap(ring, fences);
1601
1602 amdgpu_fence_driver_force_completion(ring);
1603
1604
1605 amdgpu_ib_preempt_job_recovery(&ring->sched);
1606
1607
1608 amdgpu_fence_wait_empty(ring);
1609
1610
1611 amdgpu_ib_preempt_signal_fences(fences, length);
1612 }
1613
1614 failure:
1615
1616 kthread_unpark(ring->sched.thread);
1617
1618 up_read(&adev->reset_domain->sem);
1619
1620 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1621
1622 pro_end:
1623 kfree(fences);
1624
1625 return r;
1626 }
1627
1628 static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1629 {
1630 int ret = 0;
1631 uint32_t max_freq, min_freq;
1632 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1633
1634 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1635 return -EINVAL;
1636
1637 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1638 if (ret < 0) {
1639 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1640 return ret;
1641 }
1642
1643 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
1644 if (ret == -EOPNOTSUPP) {
1645 ret = 0;
1646 goto out;
1647 }
1648 if (ret || val > max_freq || val < min_freq) {
1649 ret = -EINVAL;
1650 goto out;
1651 }
1652
1653 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
1654 if (ret)
1655 ret = -EINVAL;
1656
1657 out:
1658 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1660
1661 return ret;
1662 }
1663
1664 DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
1665 amdgpu_debugfs_ib_preempt, "%llu\n");
1666
1667 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
1668 amdgpu_debugfs_sclk_set, "%llu\n");
1669
1670 static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
1671 char __user *buf, size_t size, loff_t *pos)
1672 {
1673 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
1674 char reg_offset[12];
1675 int i, ret, len = 0;
1676
1677 if (*pos)
1678 return 0;
1679
1680 memset(reg_offset, 0, 12);
1681 ret = down_read_killable(&adev->reset_domain->sem);
1682 if (ret)
1683 return ret;
1684
1685 for (i = 0; i < adev->num_regs; i++) {
1686 sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
1687 up_read(&adev->reset_domain->sem);
1688 if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
1689 return -EFAULT;
1690
1691 len += strlen(reg_offset);
1692 ret = down_read_killable(&adev->reset_domain->sem);
1693 if (ret)
1694 return ret;
1695 }
1696
1697 up_read(&adev->reset_domain->sem);
1698 *pos += len;
1699
1700 return len;
1701 }
1702
1703 static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
1704 const char __user *buf, size_t size, loff_t *pos)
1705 {
1706 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
1707 char reg_offset[11];
1708 uint32_t *new = NULL, *tmp = NULL;
1709 int ret, i = 0, len = 0;
1710
1711 do {
1712 memset(reg_offset, 0, 11);
1713 if (copy_from_user(reg_offset, buf + len,
1714 min(10, ((int)size-len)))) {
1715 ret = -EFAULT;
1716 goto error_free;
1717 }
1718
1719 new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
1720 if (!new) {
1721 ret = -ENOMEM;
1722 goto error_free;
1723 }
1724 tmp = new;
1725 if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
1726 ret = -EINVAL;
1727 goto error_free;
1728 }
1729
1730 len += ret;
1731 i++;
1732 } while (len < size);
1733
1734 new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
1735 if (!new) {
1736 ret = -ENOMEM;
1737 goto error_free;
1738 }
1739 ret = down_write_killable(&adev->reset_domain->sem);
1740 if (ret)
1741 goto error_free;
1742
1743 swap(adev->reset_dump_reg_list, tmp);
1744 swap(adev->reset_dump_reg_value, new);
1745 adev->num_regs = i;
1746 up_write(&adev->reset_domain->sem);
1747 ret = size;
1748
1749 error_free:
1750 if (tmp != new)
1751 kfree(tmp);
1752 kfree(new);
1753 return ret;
1754 }
1755
1756 static const struct file_operations amdgpu_reset_dump_register_list = {
1757 .owner = THIS_MODULE,
1758 .read = amdgpu_reset_dump_register_list_read,
1759 .write = amdgpu_reset_dump_register_list_write,
1760 .llseek = default_llseek
1761 };
1762
1763 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1764 {
1765 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
1766 struct dentry *ent;
1767 int r, i;
1768
1769 if (!debugfs_initialized())
1770 return 0;
1771
1772 debugfs_create_x32("amdgpu_smu_debug", 0600, root,
1773 &adev->pm.smu_debug_mask);
1774
1775 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
1776 &fops_ib_preempt);
1777 if (IS_ERR(ent)) {
1778 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1779 return PTR_ERR(ent);
1780 }
1781
1782 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
1783 &fops_sclk_set);
1784 if (IS_ERR(ent)) {
1785 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1786 return PTR_ERR(ent);
1787 }
1788
1789
1790 amdgpu_ttm_debugfs_init(adev);
1791 amdgpu_debugfs_pm_init(adev);
1792 amdgpu_debugfs_sa_init(adev);
1793 amdgpu_debugfs_fence_init(adev);
1794 amdgpu_debugfs_gem_init(adev);
1795
1796 r = amdgpu_debugfs_regs_init(adev);
1797 if (r)
1798 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1799
1800 amdgpu_debugfs_firmware_init(adev);
1801 amdgpu_ta_if_debugfs_init(adev);
1802
1803 #if defined(CONFIG_DRM_AMD_DC)
1804 if (amdgpu_device_has_dc_support(adev))
1805 dtn_debugfs_init(adev);
1806 #endif
1807
1808 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1809 struct amdgpu_ring *ring = adev->rings[i];
1810
1811 if (!ring)
1812 continue;
1813
1814 amdgpu_debugfs_ring_init(adev, ring);
1815 }
1816
1817 for ( i = 0; i < adev->vcn.num_vcn_inst; i++) {
1818 if (!amdgpu_vcnfw_log)
1819 break;
1820
1821 if (adev->vcn.harvest_config & (1 << i))
1822 continue;
1823
1824 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
1825 }
1826
1827 amdgpu_ras_debugfs_create_all(adev);
1828 amdgpu_rap_debugfs_init(adev);
1829 amdgpu_securedisplay_debugfs_init(adev);
1830 amdgpu_fw_attestation_debugfs_init(adev);
1831
1832 debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
1833 &amdgpu_evict_vram_fops);
1834 debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
1835 &amdgpu_evict_gtt_fops);
1836 debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
1837 &amdgpu_debugfs_test_ib_fops);
1838 debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
1839 &amdgpu_debugfs_vm_info_fops);
1840 debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
1841 &amdgpu_benchmark_fops);
1842 debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
1843 &amdgpu_reset_dump_register_list);
1844
1845 adev->debugfs_vbios_blob.data = adev->bios;
1846 adev->debugfs_vbios_blob.size = adev->bios_size;
1847 debugfs_create_blob("amdgpu_vbios", 0444, root,
1848 &adev->debugfs_vbios_blob);
1849
1850 adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
1851 adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
1852 debugfs_create_blob("amdgpu_discovery", 0444, root,
1853 &adev->debugfs_discovery_blob);
1854
1855 return 0;
1856 }
1857
1858 #else
1859 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1860 {
1861 return 0;
1862 }
1863 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1864 {
1865 return 0;
1866 }
1867 #endif