0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "amdgpu_ras.h"
0024 #include "amdgpu.h"
0025 #include "amdgpu_mca.h"
0026
0027 #define smnMCMP0_STATUST0 0x03830408
0028 #define smnMCMP1_STATUST0 0x03b30408
0029 #define smnMCMPIO_STATUST0 0x0c930408
0030
0031
0032 static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
0033 void *ras_error_status)
0034 {
0035 amdgpu_mca_query_ras_error_count(adev,
0036 smnMCMP0_STATUST0,
0037 ras_error_status);
0038 }
0039
0040 static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
0041 enum amdgpu_ras_block block, uint32_t sub_block_index)
0042 {
0043 if (!block_obj)
0044 return -EINVAL;
0045
0046 if ((block_obj->ras_comm.block == block) &&
0047 (block_obj->ras_comm.sub_block_index == sub_block_index)) {
0048 return 0;
0049 }
0050
0051 return -EINVAL;
0052 }
0053
0054 const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
0055 .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
0056 .query_ras_error_address = NULL,
0057 };
0058
0059 struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
0060 .ras_block = {
0061 .ras_comm = {
0062 .block = AMDGPU_RAS_BLOCK__MCA,
0063 .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
0064 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
0065 .name = "mp0",
0066 },
0067 .hw_ops = &mca_v3_0_mp0_hw_ops,
0068 .ras_block_match = mca_v3_0_ras_block_match,
0069 },
0070 };
0071
0072 static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
0073 void *ras_error_status)
0074 {
0075 amdgpu_mca_query_ras_error_count(adev,
0076 smnMCMP1_STATUST0,
0077 ras_error_status);
0078 }
0079
0080 const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
0081 .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
0082 .query_ras_error_address = NULL,
0083 };
0084
0085 struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
0086 .ras_block = {
0087 .ras_comm = {
0088 .block = AMDGPU_RAS_BLOCK__MCA,
0089 .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
0090 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
0091 .name = "mp1",
0092 },
0093 .hw_ops = &mca_v3_0_mp1_hw_ops,
0094 .ras_block_match = mca_v3_0_ras_block_match,
0095 },
0096 };
0097
0098 static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
0099 void *ras_error_status)
0100 {
0101 amdgpu_mca_query_ras_error_count(adev,
0102 smnMCMPIO_STATUST0,
0103 ras_error_status);
0104 }
0105
0106 const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
0107 .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
0108 .query_ras_error_address = NULL,
0109 };
0110
0111 struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
0112 .ras_block = {
0113 .ras_comm = {
0114 .block = AMDGPU_RAS_BLOCK__MCA,
0115 .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
0116 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
0117 .name = "mpio",
0118 },
0119 .hw_ops = &mca_v3_0_mpio_hw_ops,
0120 .ras_block_match = mca_v3_0_ras_block_match,
0121 },
0122 };
0123
0124
0125 static void mca_v3_0_init(struct amdgpu_device *adev)
0126 {
0127 struct amdgpu_mca *mca = &adev->mca;
0128
0129 mca->mp0.ras = &mca_v3_0_mp0_ras;
0130 mca->mp1.ras = &mca_v3_0_mp1_ras;
0131 mca->mpio.ras = &mca_v3_0_mpio_ras;
0132 amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
0133 amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
0134 amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
0135 mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm;
0136 mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm;
0137 mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm;
0138 }
0139
0140 const struct amdgpu_mca_funcs mca_v3_0_funcs = {
0141 .init = mca_v3_0_init,
0142 };