Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2018 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <linux/firmware.h>
0025 
0026 #include "amdgpu.h"
0027 #include "amdgpu_discovery.h"
0028 #include "soc15_hw_ip.h"
0029 #include "discovery.h"
0030 
0031 #include "soc15.h"
0032 #include "gfx_v9_0.h"
0033 #include "gmc_v9_0.h"
0034 #include "df_v1_7.h"
0035 #include "df_v3_6.h"
0036 #include "nbio_v6_1.h"
0037 #include "nbio_v7_0.h"
0038 #include "nbio_v7_4.h"
0039 #include "hdp_v4_0.h"
0040 #include "vega10_ih.h"
0041 #include "vega20_ih.h"
0042 #include "sdma_v4_0.h"
0043 #include "uvd_v7_0.h"
0044 #include "vce_v4_0.h"
0045 #include "vcn_v1_0.h"
0046 #include "vcn_v2_5.h"
0047 #include "jpeg_v2_5.h"
0048 #include "smuio_v9_0.h"
0049 #include "gmc_v10_0.h"
0050 #include "gmc_v11_0.h"
0051 #include "gfxhub_v2_0.h"
0052 #include "mmhub_v2_0.h"
0053 #include "nbio_v2_3.h"
0054 #include "nbio_v4_3.h"
0055 #include "nbio_v7_2.h"
0056 #include "nbio_v7_7.h"
0057 #include "hdp_v5_0.h"
0058 #include "hdp_v5_2.h"
0059 #include "hdp_v6_0.h"
0060 #include "nv.h"
0061 #include "soc21.h"
0062 #include "navi10_ih.h"
0063 #include "ih_v6_0.h"
0064 #include "gfx_v10_0.h"
0065 #include "gfx_v11_0.h"
0066 #include "sdma_v5_0.h"
0067 #include "sdma_v5_2.h"
0068 #include "sdma_v6_0.h"
0069 #include "lsdma_v6_0.h"
0070 #include "vcn_v2_0.h"
0071 #include "jpeg_v2_0.h"
0072 #include "vcn_v3_0.h"
0073 #include "jpeg_v3_0.h"
0074 #include "vcn_v4_0.h"
0075 #include "jpeg_v4_0.h"
0076 #include "amdgpu_vkms.h"
0077 #include "mes_v10_1.h"
0078 #include "mes_v11_0.h"
0079 #include "smuio_v11_0.h"
0080 #include "smuio_v11_0_6.h"
0081 #include "smuio_v13_0.h"
0082 #include "smuio_v13_0_6.h"
0083 
0084 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
0085 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
0086 
0087 #define mmRCC_CONFIG_MEMSIZE    0xde3
0088 #define mmMM_INDEX      0x0
0089 #define mmMM_INDEX_HI       0x6
0090 #define mmMM_DATA       0x1
0091 
0092 static const char *hw_id_names[HW_ID_MAX] = {
0093     [MP1_HWID]      = "MP1",
0094     [MP2_HWID]      = "MP2",
0095     [THM_HWID]      = "THM",
0096     [SMUIO_HWID]        = "SMUIO",
0097     [FUSE_HWID]     = "FUSE",
0098     [CLKA_HWID]     = "CLKA",
0099     [PWR_HWID]      = "PWR",
0100     [GC_HWID]       = "GC",
0101     [UVD_HWID]      = "UVD",
0102     [AUDIO_AZ_HWID]     = "AUDIO_AZ",
0103     [ACP_HWID]      = "ACP",
0104     [DCI_HWID]      = "DCI",
0105     [DMU_HWID]      = "DMU",
0106     [DCO_HWID]      = "DCO",
0107     [DIO_HWID]      = "DIO",
0108     [XDMA_HWID]     = "XDMA",
0109     [DCEAZ_HWID]        = "DCEAZ",
0110     [DAZ_HWID]      = "DAZ",
0111     [SDPMUX_HWID]       = "SDPMUX",
0112     [NTB_HWID]      = "NTB",
0113     [IOHC_HWID]     = "IOHC",
0114     [L2IMU_HWID]        = "L2IMU",
0115     [VCE_HWID]      = "VCE",
0116     [MMHUB_HWID]        = "MMHUB",
0117     [ATHUB_HWID]        = "ATHUB",
0118     [DBGU_NBIO_HWID]    = "DBGU_NBIO",
0119     [DFX_HWID]      = "DFX",
0120     [DBGU0_HWID]        = "DBGU0",
0121     [DBGU1_HWID]        = "DBGU1",
0122     [OSSSYS_HWID]       = "OSSSYS",
0123     [HDP_HWID]      = "HDP",
0124     [SDMA0_HWID]        = "SDMA0",
0125     [SDMA1_HWID]        = "SDMA1",
0126     [SDMA2_HWID]        = "SDMA2",
0127     [SDMA3_HWID]        = "SDMA3",
0128     [LSDMA_HWID]        = "LSDMA",
0129     [ISP_HWID]      = "ISP",
0130     [DBGU_IO_HWID]      = "DBGU_IO",
0131     [DF_HWID]       = "DF",
0132     [CLKB_HWID]     = "CLKB",
0133     [FCH_HWID]      = "FCH",
0134     [DFX_DAP_HWID]      = "DFX_DAP",
0135     [L1IMU_PCIE_HWID]   = "L1IMU_PCIE",
0136     [L1IMU_NBIF_HWID]   = "L1IMU_NBIF",
0137     [L1IMU_IOAGR_HWID]  = "L1IMU_IOAGR",
0138     [L1IMU3_HWID]       = "L1IMU3",
0139     [L1IMU4_HWID]       = "L1IMU4",
0140     [L1IMU5_HWID]       = "L1IMU5",
0141     [L1IMU6_HWID]       = "L1IMU6",
0142     [L1IMU7_HWID]       = "L1IMU7",
0143     [L1IMU8_HWID]       = "L1IMU8",
0144     [L1IMU9_HWID]       = "L1IMU9",
0145     [L1IMU10_HWID]      = "L1IMU10",
0146     [L1IMU11_HWID]      = "L1IMU11",
0147     [L1IMU12_HWID]      = "L1IMU12",
0148     [L1IMU13_HWID]      = "L1IMU13",
0149     [L1IMU14_HWID]      = "L1IMU14",
0150     [L1IMU15_HWID]      = "L1IMU15",
0151     [WAFLC_HWID]        = "WAFLC",
0152     [FCH_USB_PD_HWID]   = "FCH_USB_PD",
0153     [PCIE_HWID]     = "PCIE",
0154     [PCS_HWID]      = "PCS",
0155     [DDCL_HWID]     = "DDCL",
0156     [SST_HWID]      = "SST",
0157     [IOAGR_HWID]        = "IOAGR",
0158     [NBIF_HWID]     = "NBIF",
0159     [IOAPIC_HWID]       = "IOAPIC",
0160     [SYSTEMHUB_HWID]    = "SYSTEMHUB",
0161     [NTBCCP_HWID]       = "NTBCCP",
0162     [UMC_HWID]      = "UMC",
0163     [SATA_HWID]     = "SATA",
0164     [USB_HWID]      = "USB",
0165     [CCXSEC_HWID]       = "CCXSEC",
0166     [XGMI_HWID]     = "XGMI",
0167     [XGBE_HWID]     = "XGBE",
0168     [MP0_HWID]      = "MP0",
0169 };
0170 
0171 static int hw_id_map[MAX_HWIP] = {
0172     [GC_HWIP]   = GC_HWID,
0173     [HDP_HWIP]  = HDP_HWID,
0174     [SDMA0_HWIP]    = SDMA0_HWID,
0175     [SDMA1_HWIP]    = SDMA1_HWID,
0176     [SDMA2_HWIP]    = SDMA2_HWID,
0177     [SDMA3_HWIP]    = SDMA3_HWID,
0178     [LSDMA_HWIP]    = LSDMA_HWID,
0179     [MMHUB_HWIP]    = MMHUB_HWID,
0180     [ATHUB_HWIP]    = ATHUB_HWID,
0181     [NBIO_HWIP] = NBIF_HWID,
0182     [MP0_HWIP]  = MP0_HWID,
0183     [MP1_HWIP]  = MP1_HWID,
0184     [UVD_HWIP]  = UVD_HWID,
0185     [VCE_HWIP]  = VCE_HWID,
0186     [DF_HWIP]   = DF_HWID,
0187     [DCE_HWIP]  = DMU_HWID,
0188     [OSSSYS_HWIP]   = OSSSYS_HWID,
0189     [SMUIO_HWIP]    = SMUIO_HWID,
0190     [PWR_HWIP]  = PWR_HWID,
0191     [NBIF_HWIP] = NBIF_HWID,
0192     [THM_HWIP]  = THM_HWID,
0193     [CLK_HWIP]  = CLKA_HWID,
0194     [UMC_HWIP]  = UMC_HWID,
0195     [XGMI_HWIP] = XGMI_HWID,
0196     [DCI_HWIP]  = DCI_HWID,
0197     [PCIE_HWIP] = PCIE_HWID,
0198 };
0199 
0200 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
0201 {
0202     uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
0203     uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
0204 
0205     amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
0206                   adev->mman.discovery_tmr_size, false);
0207     return 0;
0208 }
0209 
0210 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
0211 {
0212     const struct firmware *fw;
0213     const char *fw_name;
0214     int r;
0215 
0216     switch (amdgpu_discovery) {
0217     case 2:
0218         fw_name = FIRMWARE_IP_DISCOVERY;
0219         break;
0220     default:
0221         dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
0222         return -EINVAL;
0223     }
0224 
0225     r = request_firmware(&fw, fw_name, adev->dev);
0226     if (r) {
0227         dev_err(adev->dev, "can't load firmware \"%s\"\n",
0228             fw_name);
0229         return r;
0230     }
0231 
0232     memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
0233     release_firmware(fw);
0234 
0235     return 0;
0236 }
0237 
0238 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
0239 {
0240     uint16_t checksum = 0;
0241     int i;
0242 
0243     for (i = 0; i < size; i++)
0244         checksum += data[i];
0245 
0246     return checksum;
0247 }
0248 
0249 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
0250                             uint16_t expected)
0251 {
0252     return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
0253 }
0254 
0255 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
0256 {
0257     struct binary_header *bhdr;
0258     bhdr = (struct binary_header *)binary;
0259 
0260     return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
0261 }
0262 
0263 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
0264 {
0265     /*
0266      * So far, apply this quirk only on those Navy Flounder boards which
0267      * have a bad harvest table of VCN config.
0268      */
0269     if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
0270         (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
0271         switch (adev->pdev->revision) {
0272         case 0xC1:
0273         case 0xC2:
0274         case 0xC3:
0275         case 0xC5:
0276         case 0xC7:
0277         case 0xCF:
0278         case 0xDF:
0279             adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
0280             break;
0281         default:
0282             break;
0283         }
0284     }
0285 }
0286 
0287 static int amdgpu_discovery_init(struct amdgpu_device *adev)
0288 {
0289     struct table_info *info;
0290     struct binary_header *bhdr;
0291     uint16_t offset;
0292     uint16_t size;
0293     uint16_t checksum;
0294     int r;
0295 
0296     adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
0297     adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
0298     if (!adev->mman.discovery_bin)
0299         return -ENOMEM;
0300 
0301     r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
0302     if (r) {
0303         dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
0304         r = -EINVAL;
0305         goto out;
0306     }
0307 
0308     if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
0309         dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
0310         /* retry read ip discovery binary from file */
0311         r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
0312         if (r) {
0313             dev_err(adev->dev, "failed to read ip discovery binary from file\n");
0314             r = -EINVAL;
0315             goto out;
0316         }
0317         /* check the ip discovery binary signature */
0318         if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
0319             dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
0320             r = -EINVAL;
0321             goto out;
0322         }
0323     }
0324 
0325     bhdr = (struct binary_header *)adev->mman.discovery_bin;
0326 
0327     offset = offsetof(struct binary_header, binary_checksum) +
0328         sizeof(bhdr->binary_checksum);
0329     size = le16_to_cpu(bhdr->binary_size) - offset;
0330     checksum = le16_to_cpu(bhdr->binary_checksum);
0331 
0332     if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0333                           size, checksum)) {
0334         dev_err(adev->dev, "invalid ip discovery binary checksum\n");
0335         r = -EINVAL;
0336         goto out;
0337     }
0338 
0339     info = &bhdr->table_list[IP_DISCOVERY];
0340     offset = le16_to_cpu(info->offset);
0341     checksum = le16_to_cpu(info->checksum);
0342 
0343     if (offset) {
0344         struct ip_discovery_header *ihdr =
0345             (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
0346         if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
0347             dev_err(adev->dev, "invalid ip discovery data table signature\n");
0348             r = -EINVAL;
0349             goto out;
0350         }
0351 
0352         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0353                               le16_to_cpu(ihdr->size), checksum)) {
0354             dev_err(adev->dev, "invalid ip discovery data table checksum\n");
0355             r = -EINVAL;
0356             goto out;
0357         }
0358     }
0359 
0360     info = &bhdr->table_list[GC];
0361     offset = le16_to_cpu(info->offset);
0362     checksum = le16_to_cpu(info->checksum);
0363 
0364     if (offset) {
0365         struct gpu_info_header *ghdr =
0366             (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
0367 
0368         if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
0369             dev_err(adev->dev, "invalid ip discovery gc table id\n");
0370             r = -EINVAL;
0371             goto out;
0372         }
0373 
0374         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0375                               le32_to_cpu(ghdr->size), checksum)) {
0376             dev_err(adev->dev, "invalid gc data table checksum\n");
0377             r = -EINVAL;
0378             goto out;
0379         }
0380     }
0381 
0382     info = &bhdr->table_list[HARVEST_INFO];
0383     offset = le16_to_cpu(info->offset);
0384     checksum = le16_to_cpu(info->checksum);
0385 
0386     if (offset) {
0387         struct harvest_info_header *hhdr =
0388             (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
0389 
0390         if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
0391             dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
0392             r = -EINVAL;
0393             goto out;
0394         }
0395 
0396         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0397                               sizeof(struct harvest_table), checksum)) {
0398             dev_err(adev->dev, "invalid harvest data table checksum\n");
0399             r = -EINVAL;
0400             goto out;
0401         }
0402     }
0403 
0404     info = &bhdr->table_list[VCN_INFO];
0405     offset = le16_to_cpu(info->offset);
0406     checksum = le16_to_cpu(info->checksum);
0407 
0408     if (offset) {
0409         struct vcn_info_header *vhdr =
0410             (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
0411 
0412         if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
0413             dev_err(adev->dev, "invalid ip discovery vcn table id\n");
0414             r = -EINVAL;
0415             goto out;
0416         }
0417 
0418         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0419                               le32_to_cpu(vhdr->size_bytes), checksum)) {
0420             dev_err(adev->dev, "invalid vcn data table checksum\n");
0421             r = -EINVAL;
0422             goto out;
0423         }
0424     }
0425 
0426     info = &bhdr->table_list[MALL_INFO];
0427     offset = le16_to_cpu(info->offset);
0428     checksum = le16_to_cpu(info->checksum);
0429 
0430     if (0 && offset) {
0431         struct mall_info_header *mhdr =
0432             (struct mall_info_header *)(adev->mman.discovery_bin + offset);
0433 
0434         if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
0435             dev_err(adev->dev, "invalid ip discovery mall table id\n");
0436             r = -EINVAL;
0437             goto out;
0438         }
0439 
0440         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
0441                               le32_to_cpu(mhdr->size_bytes), checksum)) {
0442             dev_err(adev->dev, "invalid mall data table checksum\n");
0443             r = -EINVAL;
0444             goto out;
0445         }
0446     }
0447 
0448     return 0;
0449 
0450 out:
0451     kfree(adev->mman.discovery_bin);
0452     adev->mman.discovery_bin = NULL;
0453 
0454     return r;
0455 }
0456 
0457 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
0458 
0459 void amdgpu_discovery_fini(struct amdgpu_device *adev)
0460 {
0461     amdgpu_discovery_sysfs_fini(adev);
0462     kfree(adev->mman.discovery_bin);
0463     adev->mman.discovery_bin = NULL;
0464 }
0465 
0466 static int amdgpu_discovery_validate_ip(const struct ip *ip)
0467 {
0468     if (ip->number_instance >= HWIP_MAX_INSTANCE) {
0469         DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
0470               ip->number_instance);
0471         return -EINVAL;
0472     }
0473     if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
0474         DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
0475               le16_to_cpu(ip->hw_id));
0476         return -EINVAL;
0477     }
0478 
0479     return 0;
0480 }
0481 
0482 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
0483                         uint32_t *vcn_harvest_count)
0484 {
0485     struct binary_header *bhdr;
0486     struct ip_discovery_header *ihdr;
0487     struct die_header *dhdr;
0488     struct ip *ip;
0489     uint16_t die_offset, ip_offset, num_dies, num_ips;
0490     int i, j;
0491 
0492     bhdr = (struct binary_header *)adev->mman.discovery_bin;
0493     ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
0494             le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
0495     num_dies = le16_to_cpu(ihdr->num_dies);
0496 
0497     /* scan harvest bit of all IP data structures */
0498     for (i = 0; i < num_dies; i++) {
0499         die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
0500         dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
0501         num_ips = le16_to_cpu(dhdr->num_ips);
0502         ip_offset = die_offset + sizeof(*dhdr);
0503 
0504         for (j = 0; j < num_ips; j++) {
0505             ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
0506 
0507             if (amdgpu_discovery_validate_ip(ip))
0508                 goto next_ip;
0509 
0510             if (le16_to_cpu(ip->harvest) == 1) {
0511                 switch (le16_to_cpu(ip->hw_id)) {
0512                 case VCN_HWID:
0513                     (*vcn_harvest_count)++;
0514                     if (ip->number_instance == 0)
0515                         adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
0516                     else
0517                         adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
0518                     break;
0519                 case DMU_HWID:
0520                     adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
0521                     break;
0522                 default:
0523                     break;
0524                                 }
0525                         }
0526 next_ip:
0527             ip_offset += struct_size(ip, base_address, ip->num_base_address);
0528         }
0529     }
0530 }
0531 
0532 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
0533                              uint32_t *vcn_harvest_count,
0534                              uint32_t *umc_harvest_count)
0535 {
0536     struct binary_header *bhdr;
0537     struct harvest_table *harvest_info;
0538     u16 offset;
0539     int i;
0540 
0541     bhdr = (struct binary_header *)adev->mman.discovery_bin;
0542     offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
0543 
0544     if (!offset) {
0545         dev_err(adev->dev, "invalid harvest table offset\n");
0546         return;
0547     }
0548 
0549     harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
0550 
0551     for (i = 0; i < 32; i++) {
0552         if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
0553             break;
0554 
0555         switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
0556         case VCN_HWID:
0557             (*vcn_harvest_count)++;
0558             if (harvest_info->list[i].number_instance == 0)
0559                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
0560             else
0561                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
0562             break;
0563         case DMU_HWID:
0564             adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
0565             break;
0566         case UMC_HWID:
0567             (*umc_harvest_count)++;
0568             break;
0569         default:
0570             break;
0571         }
0572     }
0573 }
0574 
0575 /* ================================================== */
0576 
0577 struct ip_hw_instance {
0578     struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
0579 
0580     int hw_id;
0581     u8  num_instance;
0582     u8  major, minor, revision;
0583     u8  harvest;
0584 
0585     int num_base_addresses;
0586     u32 base_addr[];
0587 };
0588 
0589 struct ip_hw_id {
0590     struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
0591     int hw_id;
0592 };
0593 
0594 struct ip_die_entry {
0595     struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
0596     u16 num_ips;
0597 };
0598 
0599 /* -------------------------------------------------- */
0600 
0601 struct ip_hw_instance_attr {
0602     struct attribute attr;
0603     ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
0604 };
0605 
0606 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0607 {
0608     return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
0609 }
0610 
0611 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0612 {
0613     return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
0614 }
0615 
0616 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0617 {
0618     return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
0619 }
0620 
0621 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0622 {
0623     return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
0624 }
0625 
0626 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0627 {
0628     return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
0629 }
0630 
0631 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0632 {
0633     return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
0634 }
0635 
0636 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0637 {
0638     return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
0639 }
0640 
0641 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
0642 {
0643     ssize_t res, at;
0644     int ii;
0645 
0646     for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
0647         /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
0648          */
0649         if (at + 12 > PAGE_SIZE)
0650             break;
0651         res = sysfs_emit_at(buf, at, "0x%08X\n",
0652                     ip_hw_instance->base_addr[ii]);
0653         if (res <= 0)
0654             break;
0655         at += res;
0656     }
0657 
0658     return res < 0 ? res : at;
0659 }
0660 
0661 static struct ip_hw_instance_attr ip_hw_attr[] = {
0662     __ATTR_RO(hw_id),
0663     __ATTR_RO(num_instance),
0664     __ATTR_RO(major),
0665     __ATTR_RO(minor),
0666     __ATTR_RO(revision),
0667     __ATTR_RO(harvest),
0668     __ATTR_RO(num_base_addresses),
0669     __ATTR_RO(base_addr),
0670 };
0671 
0672 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
0673 ATTRIBUTE_GROUPS(ip_hw_instance);
0674 
0675 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
0676 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
0677 
0678 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
0679                     struct attribute *attr,
0680                     char *buf)
0681 {
0682     struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
0683     struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
0684 
0685     if (!ip_hw_attr->show)
0686         return -EIO;
0687 
0688     return ip_hw_attr->show(ip_hw_instance, buf);
0689 }
0690 
0691 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
0692     .show = ip_hw_instance_attr_show,
0693 };
0694 
0695 static void ip_hw_instance_release(struct kobject *kobj)
0696 {
0697     struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
0698 
0699     kfree(ip_hw_instance);
0700 }
0701 
0702 static struct kobj_type ip_hw_instance_ktype = {
0703     .release = ip_hw_instance_release,
0704     .sysfs_ops = &ip_hw_instance_sysfs_ops,
0705     .default_groups = ip_hw_instance_groups,
0706 };
0707 
0708 /* -------------------------------------------------- */
0709 
0710 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
0711 
0712 static void ip_hw_id_release(struct kobject *kobj)
0713 {
0714     struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
0715 
0716     if (!list_empty(&ip_hw_id->hw_id_kset.list))
0717         DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
0718     kfree(ip_hw_id);
0719 }
0720 
0721 static struct kobj_type ip_hw_id_ktype = {
0722     .release = ip_hw_id_release,
0723     .sysfs_ops = &kobj_sysfs_ops,
0724 };
0725 
0726 /* -------------------------------------------------- */
0727 
0728 static void die_kobj_release(struct kobject *kobj);
0729 static void ip_disc_release(struct kobject *kobj);
0730 
0731 struct ip_die_entry_attribute {
0732     struct attribute attr;
0733     ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
0734 };
0735 
0736 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
0737 
0738 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
0739 {
0740     return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
0741 }
0742 
0743 /* If there are more ip_die_entry attrs, other than the number of IPs,
0744  * we can make this intro an array of attrs, and then initialize
0745  * ip_die_entry_attrs in a loop.
0746  */
0747 static struct ip_die_entry_attribute num_ips_attr =
0748     __ATTR_RO(num_ips);
0749 
0750 static struct attribute *ip_die_entry_attrs[] = {
0751     &num_ips_attr.attr,
0752     NULL,
0753 };
0754 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
0755 
0756 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
0757 
0758 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
0759                       struct attribute *attr,
0760                       char *buf)
0761 {
0762     struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
0763     struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
0764 
0765     if (!ip_die_entry_attr->show)
0766         return -EIO;
0767 
0768     return ip_die_entry_attr->show(ip_die_entry, buf);
0769 }
0770 
0771 static void ip_die_entry_release(struct kobject *kobj)
0772 {
0773     struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
0774 
0775     if (!list_empty(&ip_die_entry->ip_kset.list))
0776         DRM_ERROR("ip_die_entry->ip_kset is not empty");
0777     kfree(ip_die_entry);
0778 }
0779 
0780 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
0781     .show = ip_die_entry_attr_show,
0782 };
0783 
0784 static struct kobj_type ip_die_entry_ktype = {
0785     .release = ip_die_entry_release,
0786     .sysfs_ops = &ip_die_entry_sysfs_ops,
0787     .default_groups = ip_die_entry_groups,
0788 };
0789 
0790 static struct kobj_type die_kobj_ktype = {
0791     .release = die_kobj_release,
0792     .sysfs_ops = &kobj_sysfs_ops,
0793 };
0794 
0795 static struct kobj_type ip_discovery_ktype = {
0796     .release = ip_disc_release,
0797     .sysfs_ops = &kobj_sysfs_ops,
0798 };
0799 
0800 struct ip_discovery_top {
0801     struct kobject kobj;    /* ip_discovery/ */
0802     struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
0803     struct amdgpu_device *adev;
0804 };
0805 
0806 static void die_kobj_release(struct kobject *kobj)
0807 {
0808     struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
0809                                struct ip_discovery_top,
0810                                die_kset);
0811     if (!list_empty(&ip_top->die_kset.list))
0812         DRM_ERROR("ip_top->die_kset is not empty");
0813 }
0814 
0815 static void ip_disc_release(struct kobject *kobj)
0816 {
0817     struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
0818                                kobj);
0819     struct amdgpu_device *adev = ip_top->adev;
0820 
0821     adev->ip_top = NULL;
0822     kfree(ip_top);
0823 }
0824 
0825 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
0826                       struct ip_die_entry *ip_die_entry,
0827                       const size_t _ip_offset, const int num_ips)
0828 {
0829     int ii, jj, kk, res;
0830 
0831     DRM_DEBUG("num_ips:%d", num_ips);
0832 
0833     /* Find all IPs of a given HW ID, and add their instance to
0834      * #die/#hw_id/#instance/<attributes>
0835      */
0836     for (ii = 0; ii < HW_ID_MAX; ii++) {
0837         struct ip_hw_id *ip_hw_id = NULL;
0838         size_t ip_offset = _ip_offset;
0839 
0840         for (jj = 0; jj < num_ips; jj++) {
0841             struct ip *ip;
0842             struct ip_hw_instance *ip_hw_instance;
0843 
0844             ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
0845             if (amdgpu_discovery_validate_ip(ip) ||
0846                 le16_to_cpu(ip->hw_id) != ii)
0847                 goto next_ip;
0848 
0849             DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
0850 
0851             /* We have a hw_id match; register the hw
0852              * block if not yet registered.
0853              */
0854             if (!ip_hw_id) {
0855                 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
0856                 if (!ip_hw_id)
0857                     return -ENOMEM;
0858                 ip_hw_id->hw_id = ii;
0859 
0860                 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
0861                 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
0862                 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
0863                 res = kset_register(&ip_hw_id->hw_id_kset);
0864                 if (res) {
0865                     DRM_ERROR("Couldn't register ip_hw_id kset");
0866                     kfree(ip_hw_id);
0867                     return res;
0868                 }
0869                 if (hw_id_names[ii]) {
0870                     res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
0871                                 &ip_hw_id->hw_id_kset.kobj,
0872                                 hw_id_names[ii]);
0873                     if (res) {
0874                         DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
0875                               hw_id_names[ii],
0876                               kobject_name(&ip_die_entry->ip_kset.kobj));
0877                     }
0878                 }
0879             }
0880 
0881             /* Now register its instance.
0882              */
0883             ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
0884                                  base_addr,
0885                                  ip->num_base_address),
0886                          GFP_KERNEL);
0887             if (!ip_hw_instance) {
0888                 DRM_ERROR("no memory for ip_hw_instance");
0889                 return -ENOMEM;
0890             }
0891             ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
0892             ip_hw_instance->num_instance = ip->number_instance;
0893             ip_hw_instance->major = ip->major;
0894             ip_hw_instance->minor = ip->minor;
0895             ip_hw_instance->revision = ip->revision;
0896             ip_hw_instance->harvest = ip->harvest;
0897             ip_hw_instance->num_base_addresses = ip->num_base_address;
0898 
0899             for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
0900                 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
0901 
0902             kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
0903             ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
0904             res = kobject_add(&ip_hw_instance->kobj, NULL,
0905                       "%d", ip_hw_instance->num_instance);
0906 next_ip:
0907             ip_offset += struct_size(ip, base_address, ip->num_base_address);
0908         }
0909     }
0910 
0911     return 0;
0912 }
0913 
0914 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
0915 {
0916     struct binary_header *bhdr;
0917     struct ip_discovery_header *ihdr;
0918     struct die_header *dhdr;
0919     struct kset *die_kset = &adev->ip_top->die_kset;
0920     u16 num_dies, die_offset, num_ips;
0921     size_t ip_offset;
0922     int ii, res;
0923 
0924     bhdr = (struct binary_header *)adev->mman.discovery_bin;
0925     ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
0926                           le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
0927     num_dies = le16_to_cpu(ihdr->num_dies);
0928 
0929     DRM_DEBUG("number of dies: %d\n", num_dies);
0930 
0931     for (ii = 0; ii < num_dies; ii++) {
0932         struct ip_die_entry *ip_die_entry;
0933 
0934         die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
0935         dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
0936         num_ips = le16_to_cpu(dhdr->num_ips);
0937         ip_offset = die_offset + sizeof(*dhdr);
0938 
0939         /* Add the die to the kset.
0940          *
0941          * dhdr->die_id == ii, which was checked in
0942          * amdgpu_discovery_reg_base_init().
0943          */
0944 
0945         ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
0946         if (!ip_die_entry)
0947             return -ENOMEM;
0948 
0949         ip_die_entry->num_ips = num_ips;
0950 
0951         kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
0952         ip_die_entry->ip_kset.kobj.kset = die_kset;
0953         ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
0954         res = kset_register(&ip_die_entry->ip_kset);
0955         if (res) {
0956             DRM_ERROR("Couldn't register ip_die_entry kset");
0957             kfree(ip_die_entry);
0958             return res;
0959         }
0960 
0961         amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
0962     }
0963 
0964     return 0;
0965 }
0966 
0967 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
0968 {
0969     struct kset *die_kset;
0970     int res, ii;
0971 
0972     adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
0973     if (!adev->ip_top)
0974         return -ENOMEM;
0975 
0976     adev->ip_top->adev = adev;
0977 
0978     res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
0979                    &adev->dev->kobj, "ip_discovery");
0980     if (res) {
0981         DRM_ERROR("Couldn't init and add ip_discovery/");
0982         goto Err;
0983     }
0984 
0985     die_kset = &adev->ip_top->die_kset;
0986     kobject_set_name(&die_kset->kobj, "%s", "die");
0987     die_kset->kobj.parent = &adev->ip_top->kobj;
0988     die_kset->kobj.ktype = &die_kobj_ktype;
0989     res = kset_register(&adev->ip_top->die_kset);
0990     if (res) {
0991         DRM_ERROR("Couldn't register die_kset");
0992         goto Err;
0993     }
0994 
0995     for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
0996         ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
0997     ip_hw_instance_attrs[ii] = NULL;
0998 
0999     res = amdgpu_discovery_sysfs_recurse(adev);
1000 
1001     return res;
1002 Err:
1003     kobject_put(&adev->ip_top->kobj);
1004     return res;
1005 }
1006 
1007 /* -------------------------------------------------- */
1008 
1009 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1010 
1011 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1012 {
1013     struct list_head *el, *tmp;
1014     struct kset *hw_id_kset;
1015 
1016     hw_id_kset = &ip_hw_id->hw_id_kset;
1017     spin_lock(&hw_id_kset->list_lock);
1018     list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1019         list_del_init(el);
1020         spin_unlock(&hw_id_kset->list_lock);
1021         /* kobject is embedded in ip_hw_instance */
1022         kobject_put(list_to_kobj(el));
1023         spin_lock(&hw_id_kset->list_lock);
1024     }
1025     spin_unlock(&hw_id_kset->list_lock);
1026     kobject_put(&ip_hw_id->hw_id_kset.kobj);
1027 }
1028 
1029 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1030 {
1031     struct list_head *el, *tmp;
1032     struct kset *ip_kset;
1033 
1034     ip_kset = &ip_die_entry->ip_kset;
1035     spin_lock(&ip_kset->list_lock);
1036     list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1037         list_del_init(el);
1038         spin_unlock(&ip_kset->list_lock);
1039         amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1040         spin_lock(&ip_kset->list_lock);
1041     }
1042     spin_unlock(&ip_kset->list_lock);
1043     kobject_put(&ip_die_entry->ip_kset.kobj);
1044 }
1045 
1046 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1047 {
1048     struct list_head *el, *tmp;
1049     struct kset *die_kset;
1050 
1051     die_kset = &adev->ip_top->die_kset;
1052     spin_lock(&die_kset->list_lock);
1053     list_for_each_prev_safe(el, tmp, &die_kset->list) {
1054         list_del_init(el);
1055         spin_unlock(&die_kset->list_lock);
1056         amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1057         spin_lock(&die_kset->list_lock);
1058     }
1059     spin_unlock(&die_kset->list_lock);
1060     kobject_put(&adev->ip_top->die_kset.kobj);
1061     kobject_put(&adev->ip_top->kobj);
1062 }
1063 
1064 /* ================================================== */
1065 
1066 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1067 {
1068     struct binary_header *bhdr;
1069     struct ip_discovery_header *ihdr;
1070     struct die_header *dhdr;
1071     struct ip *ip;
1072     uint16_t die_offset;
1073     uint16_t ip_offset;
1074     uint16_t num_dies;
1075     uint16_t num_ips;
1076     uint8_t num_base_address;
1077     int hw_ip;
1078     int i, j, k;
1079     int r;
1080 
1081     r = amdgpu_discovery_init(adev);
1082     if (r) {
1083         DRM_ERROR("amdgpu_discovery_init failed\n");
1084         return r;
1085     }
1086 
1087     bhdr = (struct binary_header *)adev->mman.discovery_bin;
1088     ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1089             le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1090     num_dies = le16_to_cpu(ihdr->num_dies);
1091 
1092     DRM_DEBUG("number of dies: %d\n", num_dies);
1093 
1094     for (i = 0; i < num_dies; i++) {
1095         die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1096         dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1097         num_ips = le16_to_cpu(dhdr->num_ips);
1098         ip_offset = die_offset + sizeof(*dhdr);
1099 
1100         if (le16_to_cpu(dhdr->die_id) != i) {
1101             DRM_ERROR("invalid die id %d, expected %d\n",
1102                     le16_to_cpu(dhdr->die_id), i);
1103             return -EINVAL;
1104         }
1105 
1106         DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1107                 le16_to_cpu(dhdr->die_id), num_ips);
1108 
1109         for (j = 0; j < num_ips; j++) {
1110             ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1111 
1112             if (amdgpu_discovery_validate_ip(ip))
1113                 goto next_ip;
1114 
1115             num_base_address = ip->num_base_address;
1116 
1117             DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1118                   hw_id_names[le16_to_cpu(ip->hw_id)],
1119                   le16_to_cpu(ip->hw_id),
1120                   ip->number_instance,
1121                   ip->major, ip->minor,
1122                   ip->revision);
1123 
1124             if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1125                 /* Bit [5:0]: original revision value
1126                  * Bit [7:6]: en/decode capability:
1127                  *     0b00 : VCN function normally
1128                  *     0b10 : encode is disabled
1129                  *     0b01 : decode is disabled
1130                  */
1131                 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1132                     ip->revision & 0xc0;
1133                 ip->revision &= ~0xc0;
1134                 if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
1135                     adev->vcn.num_vcn_inst++;
1136                 else
1137                     dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1138                         adev->vcn.num_vcn_inst + 1,
1139                         AMDGPU_MAX_VCN_INSTANCES);
1140             }
1141             if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1142                 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1143                 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1144                 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1145                 if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
1146                     adev->sdma.num_instances++;
1147                 else
1148                     dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1149                         adev->sdma.num_instances + 1,
1150                         AMDGPU_MAX_SDMA_INSTANCES);
1151             }
1152 
1153             if (le16_to_cpu(ip->hw_id) == UMC_HWID)
1154                 adev->gmc.num_umc++;
1155 
1156             for (k = 0; k < num_base_address; k++) {
1157                 /*
1158                  * convert the endianness of base addresses in place,
1159                  * so that we don't need to convert them when accessing adev->reg_offset.
1160                  */
1161                 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1162                 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1163             }
1164 
1165             for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1166                 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
1167                     DRM_DEBUG("set register base offset for %s\n",
1168                             hw_id_names[le16_to_cpu(ip->hw_id)]);
1169                     adev->reg_offset[hw_ip][ip->number_instance] =
1170                         ip->base_address;
1171                     /* Instance support is somewhat inconsistent.
1172                      * SDMA is a good example.  Sienna cichlid has 4 total
1173                      * SDMA instances, each enumerated separately (HWIDs
1174                      * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1175                      * but they are enumerated as multiple instances of the
1176                      * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1177                      * example.  On most chips there are multiple instances
1178                      * with the same HWID.
1179                      */
1180                     adev->ip_versions[hw_ip][ip->number_instance] =
1181                         IP_VERSION(ip->major, ip->minor, ip->revision);
1182                 }
1183             }
1184 
1185 next_ip:
1186             ip_offset += struct_size(ip, base_address, ip->num_base_address);
1187         }
1188     }
1189 
1190     amdgpu_discovery_sysfs_init(adev);
1191 
1192     return 0;
1193 }
1194 
1195 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
1196                     int *major, int *minor, int *revision)
1197 {
1198     struct binary_header *bhdr;
1199     struct ip_discovery_header *ihdr;
1200     struct die_header *dhdr;
1201     struct ip *ip;
1202     uint16_t die_offset;
1203     uint16_t ip_offset;
1204     uint16_t num_dies;
1205     uint16_t num_ips;
1206     int i, j;
1207 
1208     if (!adev->mman.discovery_bin) {
1209         DRM_ERROR("ip discovery uninitialized\n");
1210         return -EINVAL;
1211     }
1212 
1213     bhdr = (struct binary_header *)adev->mman.discovery_bin;
1214     ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1215             le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1216     num_dies = le16_to_cpu(ihdr->num_dies);
1217 
1218     for (i = 0; i < num_dies; i++) {
1219         die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1220         dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1221         num_ips = le16_to_cpu(dhdr->num_ips);
1222         ip_offset = die_offset + sizeof(*dhdr);
1223 
1224         for (j = 0; j < num_ips; j++) {
1225             ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1226 
1227             if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
1228                 if (major)
1229                     *major = ip->major;
1230                 if (minor)
1231                     *minor = ip->minor;
1232                 if (revision)
1233                     *revision = ip->revision;
1234                 return 0;
1235             }
1236             ip_offset += struct_size(ip, base_address, ip->num_base_address);
1237         }
1238     }
1239 
1240     return -EINVAL;
1241 }
1242 
1243 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1244 {
1245     int vcn_harvest_count = 0;
1246     int umc_harvest_count = 0;
1247 
1248     /*
1249      * Harvest table does not fit Navi1x and legacy GPUs,
1250      * so read harvest bit per IP data structure to set
1251      * harvest configuration.
1252      */
1253     if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
1254         if ((adev->pdev->device == 0x731E &&
1255             (adev->pdev->revision == 0xC6 ||
1256              adev->pdev->revision == 0xC7)) ||
1257             (adev->pdev->device == 0x7340 &&
1258              adev->pdev->revision == 0xC9) ||
1259             (adev->pdev->device == 0x7360 &&
1260              adev->pdev->revision == 0xC7))
1261             amdgpu_discovery_read_harvest_bit_per_ip(adev,
1262                 &vcn_harvest_count);
1263     } else {
1264         amdgpu_discovery_read_from_harvest_table(adev,
1265                              &vcn_harvest_count,
1266                              &umc_harvest_count);
1267     }
1268 
1269     amdgpu_discovery_harvest_config_quirk(adev);
1270 
1271     if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1272         adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1273         adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1274     }
1275 
1276     if (umc_harvest_count < adev->gmc.num_umc) {
1277         adev->gmc.num_umc -= umc_harvest_count;
1278     }
1279 }
1280 
1281 union gc_info {
1282     struct gc_info_v1_0 v1;
1283     struct gc_info_v1_1 v1_1;
1284     struct gc_info_v1_2 v1_2;
1285     struct gc_info_v2_0 v2;
1286 };
1287 
1288 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1289 {
1290     struct binary_header *bhdr;
1291     union gc_info *gc_info;
1292     u16 offset;
1293 
1294     if (!adev->mman.discovery_bin) {
1295         DRM_ERROR("ip discovery uninitialized\n");
1296         return -EINVAL;
1297     }
1298 
1299     bhdr = (struct binary_header *)adev->mman.discovery_bin;
1300     offset = le16_to_cpu(bhdr->table_list[GC].offset);
1301 
1302     if (!offset)
1303         return 0;
1304 
1305     gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1306 
1307     switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1308     case 1:
1309         adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1310         adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1311                               le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1312         adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1313         adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1314         adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1315         adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1316         adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1317         adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1318         adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1319         adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1320         adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1321         adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1322         adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1323         adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1324         adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1325             le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1326         adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1327         if (gc_info->v1.header.version_minor >= 1) {
1328             adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1329             adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1330             adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1331         }
1332         if (gc_info->v1.header.version_minor >= 2) {
1333             adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1334             adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1335             adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1336             adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1337             adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1338             adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1339             adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1340             adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1341         }
1342         break;
1343     case 2:
1344         adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1345         adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1346         adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1347         adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1348         adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1349         adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1350         adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1351         adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1352         adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1353         adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1354         adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1355         adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1356         adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1357         adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1358         adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1359             le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1360         adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1361         break;
1362     default:
1363         dev_err(adev->dev,
1364             "Unhandled GC info table %d.%d\n",
1365             le16_to_cpu(gc_info->v1.header.version_major),
1366             le16_to_cpu(gc_info->v1.header.version_minor));
1367         return -EINVAL;
1368     }
1369     return 0;
1370 }
1371 
1372 union mall_info {
1373     struct mall_info_v1_0 v1;
1374 };
1375 
1376 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1377 {
1378     struct binary_header *bhdr;
1379     union mall_info *mall_info;
1380     u32 u, mall_size_per_umc, m_s_present, half_use;
1381     u64 mall_size;
1382     u16 offset;
1383 
1384     if (!adev->mman.discovery_bin) {
1385         DRM_ERROR("ip discovery uninitialized\n");
1386         return -EINVAL;
1387     }
1388 
1389     bhdr = (struct binary_header *)adev->mman.discovery_bin;
1390     offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1391 
1392     if (!offset)
1393         return 0;
1394 
1395     mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1396 
1397     switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1398     case 1:
1399         mall_size = 0;
1400         mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1401         m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1402         half_use = le32_to_cpu(mall_info->v1.m_half_use);
1403         for (u = 0; u < adev->gmc.num_umc; u++) {
1404             if (m_s_present & (1 << u))
1405                 mall_size += mall_size_per_umc * 2;
1406             else if (half_use & (1 << u))
1407                 mall_size += mall_size_per_umc / 2;
1408             else
1409                 mall_size += mall_size_per_umc;
1410         }
1411         adev->gmc.mall_size = mall_size;
1412         break;
1413     default:
1414         dev_err(adev->dev,
1415             "Unhandled MALL info table %d.%d\n",
1416             le16_to_cpu(mall_info->v1.header.version_major),
1417             le16_to_cpu(mall_info->v1.header.version_minor));
1418         return -EINVAL;
1419     }
1420     return 0;
1421 }
1422 
1423 union vcn_info {
1424     struct vcn_info_v1_0 v1;
1425 };
1426 
1427 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1428 {
1429     struct binary_header *bhdr;
1430     union vcn_info *vcn_info;
1431     u16 offset;
1432     int v;
1433 
1434     if (!adev->mman.discovery_bin) {
1435         DRM_ERROR("ip discovery uninitialized\n");
1436         return -EINVAL;
1437     }
1438 
1439     /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1440      * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1441      * but that may change in the future with new GPUs so keep this
1442      * check for defensive purposes.
1443      */
1444     if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1445         dev_err(adev->dev, "invalid vcn instances\n");
1446         return -EINVAL;
1447     }
1448 
1449     bhdr = (struct binary_header *)adev->mman.discovery_bin;
1450     offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1451 
1452     if (!offset)
1453         return 0;
1454 
1455     vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1456 
1457     switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1458     case 1:
1459         /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1460          * so this won't overflow.
1461          */
1462         for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1463             adev->vcn.vcn_codec_disable_mask[v] =
1464                 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1465         }
1466         break;
1467     default:
1468         dev_err(adev->dev,
1469             "Unhandled VCN info table %d.%d\n",
1470             le16_to_cpu(vcn_info->v1.header.version_major),
1471             le16_to_cpu(vcn_info->v1.header.version_minor));
1472         return -EINVAL;
1473     }
1474     return 0;
1475 }
1476 
1477 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1478 {
1479     /* what IP to use for this? */
1480     switch (adev->ip_versions[GC_HWIP][0]) {
1481     case IP_VERSION(9, 0, 1):
1482     case IP_VERSION(9, 1, 0):
1483     case IP_VERSION(9, 2, 1):
1484     case IP_VERSION(9, 2, 2):
1485     case IP_VERSION(9, 3, 0):
1486     case IP_VERSION(9, 4, 0):
1487     case IP_VERSION(9, 4, 1):
1488     case IP_VERSION(9, 4, 2):
1489         amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1490         break;
1491     case IP_VERSION(10, 1, 10):
1492     case IP_VERSION(10, 1, 1):
1493     case IP_VERSION(10, 1, 2):
1494     case IP_VERSION(10, 1, 3):
1495     case IP_VERSION(10, 1, 4):
1496     case IP_VERSION(10, 3, 0):
1497     case IP_VERSION(10, 3, 1):
1498     case IP_VERSION(10, 3, 2):
1499     case IP_VERSION(10, 3, 3):
1500     case IP_VERSION(10, 3, 4):
1501     case IP_VERSION(10, 3, 5):
1502     case IP_VERSION(10, 3, 6):
1503     case IP_VERSION(10, 3, 7):
1504         amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1505         break;
1506     case IP_VERSION(11, 0, 0):
1507     case IP_VERSION(11, 0, 1):
1508     case IP_VERSION(11, 0, 2):
1509         amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1510         break;
1511     default:
1512         dev_err(adev->dev,
1513             "Failed to add common ip block(GC_HWIP:0x%x)\n",
1514             adev->ip_versions[GC_HWIP][0]);
1515         return -EINVAL;
1516     }
1517     return 0;
1518 }
1519 
1520 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1521 {
1522     /* use GC or MMHUB IP version */
1523     switch (adev->ip_versions[GC_HWIP][0]) {
1524     case IP_VERSION(9, 0, 1):
1525     case IP_VERSION(9, 1, 0):
1526     case IP_VERSION(9, 2, 1):
1527     case IP_VERSION(9, 2, 2):
1528     case IP_VERSION(9, 3, 0):
1529     case IP_VERSION(9, 4, 0):
1530     case IP_VERSION(9, 4, 1):
1531     case IP_VERSION(9, 4, 2):
1532         amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1533         break;
1534     case IP_VERSION(10, 1, 10):
1535     case IP_VERSION(10, 1, 1):
1536     case IP_VERSION(10, 1, 2):
1537     case IP_VERSION(10, 1, 3):
1538     case IP_VERSION(10, 1, 4):
1539     case IP_VERSION(10, 3, 0):
1540     case IP_VERSION(10, 3, 1):
1541     case IP_VERSION(10, 3, 2):
1542     case IP_VERSION(10, 3, 3):
1543     case IP_VERSION(10, 3, 4):
1544     case IP_VERSION(10, 3, 5):
1545     case IP_VERSION(10, 3, 6):
1546     case IP_VERSION(10, 3, 7):
1547         amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1548         break;
1549     case IP_VERSION(11, 0, 0):
1550     case IP_VERSION(11, 0, 1):
1551     case IP_VERSION(11, 0, 2):
1552         amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1553         break;
1554     default:
1555         dev_err(adev->dev,
1556             "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1557             adev->ip_versions[GC_HWIP][0]);
1558         return -EINVAL;
1559     }
1560     return 0;
1561 }
1562 
1563 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1564 {
1565     switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1566     case IP_VERSION(4, 0, 0):
1567     case IP_VERSION(4, 0, 1):
1568     case IP_VERSION(4, 1, 0):
1569     case IP_VERSION(4, 1, 1):
1570     case IP_VERSION(4, 3, 0):
1571         amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1572         break;
1573     case IP_VERSION(4, 2, 0):
1574     case IP_VERSION(4, 2, 1):
1575     case IP_VERSION(4, 4, 0):
1576         amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1577         break;
1578     case IP_VERSION(5, 0, 0):
1579     case IP_VERSION(5, 0, 1):
1580     case IP_VERSION(5, 0, 2):
1581     case IP_VERSION(5, 0, 3):
1582     case IP_VERSION(5, 2, 0):
1583     case IP_VERSION(5, 2, 1):
1584         amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1585         break;
1586     case IP_VERSION(6, 0, 0):
1587     case IP_VERSION(6, 0, 1):
1588     case IP_VERSION(6, 0, 2):
1589         amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1590         break;
1591     default:
1592         dev_err(adev->dev,
1593             "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1594             adev->ip_versions[OSSSYS_HWIP][0]);
1595         return -EINVAL;
1596     }
1597     return 0;
1598 }
1599 
1600 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1601 {
1602     switch (adev->ip_versions[MP0_HWIP][0]) {
1603     case IP_VERSION(9, 0, 0):
1604         amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1605         break;
1606     case IP_VERSION(10, 0, 0):
1607     case IP_VERSION(10, 0, 1):
1608         amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1609         break;
1610     case IP_VERSION(11, 0, 0):
1611     case IP_VERSION(11, 0, 2):
1612     case IP_VERSION(11, 0, 4):
1613     case IP_VERSION(11, 0, 5):
1614     case IP_VERSION(11, 0, 9):
1615     case IP_VERSION(11, 0, 7):
1616     case IP_VERSION(11, 0, 11):
1617     case IP_VERSION(11, 0, 12):
1618     case IP_VERSION(11, 0, 13):
1619     case IP_VERSION(11, 5, 0):
1620         amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1621         break;
1622     case IP_VERSION(11, 0, 8):
1623         amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1624         break;
1625     case IP_VERSION(11, 0, 3):
1626     case IP_VERSION(12, 0, 1):
1627         amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1628         break;
1629     case IP_VERSION(13, 0, 0):
1630     case IP_VERSION(13, 0, 1):
1631     case IP_VERSION(13, 0, 2):
1632     case IP_VERSION(13, 0, 3):
1633     case IP_VERSION(13, 0, 5):
1634     case IP_VERSION(13, 0, 7):
1635     case IP_VERSION(13, 0, 8):
1636         amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1637         break;
1638     case IP_VERSION(13, 0, 4):
1639         amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1640         break;
1641     default:
1642         dev_err(adev->dev,
1643             "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1644             adev->ip_versions[MP0_HWIP][0]);
1645         return -EINVAL;
1646     }
1647     return 0;
1648 }
1649 
1650 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1651 {
1652     switch (adev->ip_versions[MP1_HWIP][0]) {
1653     case IP_VERSION(9, 0, 0):
1654     case IP_VERSION(10, 0, 0):
1655     case IP_VERSION(10, 0, 1):
1656     case IP_VERSION(11, 0, 2):
1657         if (adev->asic_type == CHIP_ARCTURUS)
1658             amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1659         else
1660             amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1661         break;
1662     case IP_VERSION(11, 0, 0):
1663     case IP_VERSION(11, 0, 5):
1664     case IP_VERSION(11, 0, 9):
1665     case IP_VERSION(11, 0, 7):
1666     case IP_VERSION(11, 0, 8):
1667     case IP_VERSION(11, 0, 11):
1668     case IP_VERSION(11, 0, 12):
1669     case IP_VERSION(11, 0, 13):
1670     case IP_VERSION(11, 5, 0):
1671         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1672         break;
1673     case IP_VERSION(12, 0, 0):
1674     case IP_VERSION(12, 0, 1):
1675         amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1676         break;
1677     case IP_VERSION(13, 0, 0):
1678     case IP_VERSION(13, 0, 1):
1679     case IP_VERSION(13, 0, 2):
1680     case IP_VERSION(13, 0, 3):
1681     case IP_VERSION(13, 0, 4):
1682     case IP_VERSION(13, 0, 5):
1683     case IP_VERSION(13, 0, 7):
1684     case IP_VERSION(13, 0, 8):
1685         amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1686         break;
1687     default:
1688         dev_err(adev->dev,
1689             "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1690             adev->ip_versions[MP1_HWIP][0]);
1691         return -EINVAL;
1692     }
1693     return 0;
1694 }
1695 
1696 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1697 {
1698     if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
1699         amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1700         return 0;
1701     }
1702 
1703     if (!amdgpu_device_has_dc_support(adev))
1704         return 0;
1705 
1706 #if defined(CONFIG_DRM_AMD_DC)
1707     if (adev->ip_versions[DCE_HWIP][0]) {
1708         switch (adev->ip_versions[DCE_HWIP][0]) {
1709         case IP_VERSION(1, 0, 0):
1710         case IP_VERSION(1, 0, 1):
1711         case IP_VERSION(2, 0, 2):
1712         case IP_VERSION(2, 0, 0):
1713         case IP_VERSION(2, 0, 3):
1714         case IP_VERSION(2, 1, 0):
1715         case IP_VERSION(3, 0, 0):
1716         case IP_VERSION(3, 0, 2):
1717         case IP_VERSION(3, 0, 3):
1718         case IP_VERSION(3, 0, 1):
1719         case IP_VERSION(3, 1, 2):
1720         case IP_VERSION(3, 1, 3):
1721         case IP_VERSION(3, 1, 4):
1722         case IP_VERSION(3, 1, 5):
1723         case IP_VERSION(3, 1, 6):
1724         case IP_VERSION(3, 2, 0):
1725         case IP_VERSION(3, 2, 1):
1726             amdgpu_device_ip_block_add(adev, &dm_ip_block);
1727             break;
1728         default:
1729             dev_err(adev->dev,
1730                 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1731                 adev->ip_versions[DCE_HWIP][0]);
1732             return -EINVAL;
1733         }
1734     } else if (adev->ip_versions[DCI_HWIP][0]) {
1735         switch (adev->ip_versions[DCI_HWIP][0]) {
1736         case IP_VERSION(12, 0, 0):
1737         case IP_VERSION(12, 0, 1):
1738         case IP_VERSION(12, 1, 0):
1739             amdgpu_device_ip_block_add(adev, &dm_ip_block);
1740             break;
1741         default:
1742             dev_err(adev->dev,
1743                 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1744                 adev->ip_versions[DCI_HWIP][0]);
1745             return -EINVAL;
1746         }
1747     }
1748 #endif
1749     return 0;
1750 }
1751 
1752 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1753 {
1754     switch (adev->ip_versions[GC_HWIP][0]) {
1755     case IP_VERSION(9, 0, 1):
1756     case IP_VERSION(9, 1, 0):
1757     case IP_VERSION(9, 2, 1):
1758     case IP_VERSION(9, 2, 2):
1759     case IP_VERSION(9, 3, 0):
1760     case IP_VERSION(9, 4, 0):
1761     case IP_VERSION(9, 4, 1):
1762     case IP_VERSION(9, 4, 2):
1763         amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1764         break;
1765     case IP_VERSION(10, 1, 10):
1766     case IP_VERSION(10, 1, 2):
1767     case IP_VERSION(10, 1, 1):
1768     case IP_VERSION(10, 1, 3):
1769     case IP_VERSION(10, 1, 4):
1770     case IP_VERSION(10, 3, 0):
1771     case IP_VERSION(10, 3, 2):
1772     case IP_VERSION(10, 3, 1):
1773     case IP_VERSION(10, 3, 4):
1774     case IP_VERSION(10, 3, 5):
1775     case IP_VERSION(10, 3, 6):
1776     case IP_VERSION(10, 3, 3):
1777     case IP_VERSION(10, 3, 7):
1778         amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1779         break;
1780     case IP_VERSION(11, 0, 0):
1781     case IP_VERSION(11, 0, 1):
1782     case IP_VERSION(11, 0, 2):
1783         amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1784         break;
1785     default:
1786         dev_err(adev->dev,
1787             "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1788             adev->ip_versions[GC_HWIP][0]);
1789         return -EINVAL;
1790     }
1791     return 0;
1792 }
1793 
1794 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1795 {
1796     switch (adev->ip_versions[SDMA0_HWIP][0]) {
1797     case IP_VERSION(4, 0, 0):
1798     case IP_VERSION(4, 0, 1):
1799     case IP_VERSION(4, 1, 0):
1800     case IP_VERSION(4, 1, 1):
1801     case IP_VERSION(4, 1, 2):
1802     case IP_VERSION(4, 2, 0):
1803     case IP_VERSION(4, 2, 2):
1804     case IP_VERSION(4, 4, 0):
1805         amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1806         break;
1807     case IP_VERSION(5, 0, 0):
1808     case IP_VERSION(5, 0, 1):
1809     case IP_VERSION(5, 0, 2):
1810     case IP_VERSION(5, 0, 5):
1811         amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1812         break;
1813     case IP_VERSION(5, 2, 0):
1814     case IP_VERSION(5, 2, 2):
1815     case IP_VERSION(5, 2, 4):
1816     case IP_VERSION(5, 2, 5):
1817     case IP_VERSION(5, 2, 6):
1818     case IP_VERSION(5, 2, 3):
1819     case IP_VERSION(5, 2, 1):
1820     case IP_VERSION(5, 2, 7):
1821         amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1822         break;
1823     case IP_VERSION(6, 0, 0):
1824     case IP_VERSION(6, 0, 1):
1825     case IP_VERSION(6, 0, 2):
1826         amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1827         break;
1828     default:
1829         dev_err(adev->dev,
1830             "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1831             adev->ip_versions[SDMA0_HWIP][0]);
1832         return -EINVAL;
1833     }
1834     return 0;
1835 }
1836 
1837 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1838 {
1839     if (adev->ip_versions[VCE_HWIP][0]) {
1840         switch (adev->ip_versions[UVD_HWIP][0]) {
1841         case IP_VERSION(7, 0, 0):
1842         case IP_VERSION(7, 2, 0):
1843             /* UVD is not supported on vega20 SR-IOV */
1844             if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1845                 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1846             break;
1847         default:
1848             dev_err(adev->dev,
1849                 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1850                 adev->ip_versions[UVD_HWIP][0]);
1851             return -EINVAL;
1852         }
1853         switch (adev->ip_versions[VCE_HWIP][0]) {
1854         case IP_VERSION(4, 0, 0):
1855         case IP_VERSION(4, 1, 0):
1856             /* VCE is not supported on vega20 SR-IOV */
1857             if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1858                 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
1859             break;
1860         default:
1861             dev_err(adev->dev,
1862                 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
1863                 adev->ip_versions[VCE_HWIP][0]);
1864             return -EINVAL;
1865         }
1866     } else {
1867         switch (adev->ip_versions[UVD_HWIP][0]) {
1868         case IP_VERSION(1, 0, 0):
1869         case IP_VERSION(1, 0, 1):
1870             amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
1871             break;
1872         case IP_VERSION(2, 0, 0):
1873         case IP_VERSION(2, 0, 2):
1874         case IP_VERSION(2, 2, 0):
1875             amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
1876             if (!amdgpu_sriov_vf(adev))
1877                 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
1878             break;
1879         case IP_VERSION(2, 0, 3):
1880             break;
1881         case IP_VERSION(2, 5, 0):
1882             amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
1883             amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
1884             break;
1885         case IP_VERSION(2, 6, 0):
1886             amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
1887             amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
1888             break;
1889         case IP_VERSION(3, 0, 0):
1890         case IP_VERSION(3, 0, 16):
1891         case IP_VERSION(3, 1, 1):
1892         case IP_VERSION(3, 1, 2):
1893         case IP_VERSION(3, 0, 2):
1894         case IP_VERSION(3, 0, 192):
1895             amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1896             if (!amdgpu_sriov_vf(adev))
1897                 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1898             break;
1899         case IP_VERSION(3, 0, 33):
1900             amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1901             break;
1902         case IP_VERSION(4, 0, 0):
1903         case IP_VERSION(4, 0, 2):
1904         case IP_VERSION(4, 0, 4):
1905             amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
1906             amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
1907             break;
1908         default:
1909             dev_err(adev->dev,
1910                 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
1911                 adev->ip_versions[UVD_HWIP][0]);
1912             return -EINVAL;
1913         }
1914     }
1915     return 0;
1916 }
1917 
1918 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
1919 {
1920     switch (adev->ip_versions[GC_HWIP][0]) {
1921     case IP_VERSION(10, 1, 10):
1922     case IP_VERSION(10, 1, 1):
1923     case IP_VERSION(10, 1, 2):
1924     case IP_VERSION(10, 1, 3):
1925     case IP_VERSION(10, 1, 4):
1926     case IP_VERSION(10, 3, 0):
1927     case IP_VERSION(10, 3, 1):
1928     case IP_VERSION(10, 3, 2):
1929     case IP_VERSION(10, 3, 3):
1930     case IP_VERSION(10, 3, 4):
1931     case IP_VERSION(10, 3, 5):
1932     case IP_VERSION(10, 3, 6):
1933         if (amdgpu_mes) {
1934             amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
1935             adev->enable_mes = true;
1936             if (amdgpu_mes_kiq)
1937                 adev->enable_mes_kiq = true;
1938         }
1939         break;
1940     case IP_VERSION(11, 0, 0):
1941     case IP_VERSION(11, 0, 1):
1942     case IP_VERSION(11, 0, 2):
1943         amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
1944         adev->enable_mes = true;
1945         adev->enable_mes_kiq = true;
1946         break;
1947     default:
1948         break;
1949     }
1950     return 0;
1951 }
1952 
1953 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
1954 {
1955     int r;
1956 
1957     switch (adev->asic_type) {
1958     case CHIP_VEGA10:
1959         vega10_reg_base_init(adev);
1960         adev->sdma.num_instances = 2;
1961         adev->gmc.num_umc = 4;
1962         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1963         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1964         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
1965         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
1966         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
1967         adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
1968         adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1969         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
1970         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
1971         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1972         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1973         adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1974         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
1975         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
1976         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1977         adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1978         adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
1979         break;
1980     case CHIP_VEGA12:
1981         vega10_reg_base_init(adev);
1982         adev->sdma.num_instances = 2;
1983         adev->gmc.num_umc = 4;
1984         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1985         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1986         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
1987         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
1988         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
1989         adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
1990         adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
1991         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
1992         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
1993         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1994         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1995         adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1996         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
1997         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
1998         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1999         adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2000         adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2001         break;
2002     case CHIP_RAVEN:
2003         vega10_reg_base_init(adev);
2004         adev->sdma.num_instances = 1;
2005         adev->vcn.num_vcn_inst = 1;
2006         adev->gmc.num_umc = 2;
2007         if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2008             adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2009             adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2010             adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2011             adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2012             adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2013             adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2014             adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2015             adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2016             adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2017             adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2018             adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2019             adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2020             adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2021             adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2022             adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2023         } else {
2024             adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2025             adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2026             adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2027             adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2028             adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2029             adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2030             adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2031             adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2032             adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2033             adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2034             adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2035             adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2036             adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2037             adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2038             adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2039         }
2040         break;
2041     case CHIP_VEGA20:
2042         vega20_reg_base_init(adev);
2043         adev->sdma.num_instances = 2;
2044         adev->gmc.num_umc = 8;
2045         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2046         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2047         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2048         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2049         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2050         adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2051         adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2052         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2053         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2054         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2055         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2056         adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2057         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2058         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2059         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2060         adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2061         adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2062         adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2063         break;
2064     case CHIP_ARCTURUS:
2065         arct_reg_base_init(adev);
2066         adev->sdma.num_instances = 8;
2067         adev->vcn.num_vcn_inst = 2;
2068         adev->gmc.num_umc = 8;
2069         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2070         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2071         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2072         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2073         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2074         adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2075         adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2076         adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2077         adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2078         adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2079         adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2080         adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2081         adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2082         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2083         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2084         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2085         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2086         adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2087         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2088         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2089         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2090         adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2091         break;
2092     case CHIP_ALDEBARAN:
2093         aldebaran_reg_base_init(adev);
2094         adev->sdma.num_instances = 5;
2095         adev->vcn.num_vcn_inst = 2;
2096         adev->gmc.num_umc = 4;
2097         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2098         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2099         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2100         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2101         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2102         adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2103         adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2104         adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2105         adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2106         adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2107         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2108         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2109         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2110         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2111         adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2112         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2113         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2114         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2115         adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2116         adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2117         break;
2118     default:
2119         r = amdgpu_discovery_reg_base_init(adev);
2120         if (r)
2121             return -EINVAL;
2122 
2123         amdgpu_discovery_harvest_ip(adev);
2124         amdgpu_discovery_get_gfx_info(adev);
2125         amdgpu_discovery_get_mall_info(adev);
2126         amdgpu_discovery_get_vcn_info(adev);
2127         break;
2128     }
2129 
2130     switch (adev->ip_versions[GC_HWIP][0]) {
2131     case IP_VERSION(9, 0, 1):
2132     case IP_VERSION(9, 2, 1):
2133     case IP_VERSION(9, 4, 0):
2134     case IP_VERSION(9, 4, 1):
2135     case IP_VERSION(9, 4, 2):
2136         adev->family = AMDGPU_FAMILY_AI;
2137         break;
2138     case IP_VERSION(9, 1, 0):
2139     case IP_VERSION(9, 2, 2):
2140     case IP_VERSION(9, 3, 0):
2141         adev->family = AMDGPU_FAMILY_RV;
2142         break;
2143     case IP_VERSION(10, 1, 10):
2144     case IP_VERSION(10, 1, 1):
2145     case IP_VERSION(10, 1, 2):
2146     case IP_VERSION(10, 1, 3):
2147     case IP_VERSION(10, 1, 4):
2148     case IP_VERSION(10, 3, 0):
2149     case IP_VERSION(10, 3, 2):
2150     case IP_VERSION(10, 3, 4):
2151     case IP_VERSION(10, 3, 5):
2152         adev->family = AMDGPU_FAMILY_NV;
2153         break;
2154     case IP_VERSION(10, 3, 1):
2155         adev->family = AMDGPU_FAMILY_VGH;
2156         break;
2157     case IP_VERSION(10, 3, 3):
2158         adev->family = AMDGPU_FAMILY_YC;
2159         break;
2160     case IP_VERSION(10, 3, 6):
2161         adev->family = AMDGPU_FAMILY_GC_10_3_6;
2162         break;
2163     case IP_VERSION(10, 3, 7):
2164         adev->family = AMDGPU_FAMILY_GC_10_3_7;
2165         break;
2166     case IP_VERSION(11, 0, 0):
2167     case IP_VERSION(11, 0, 2):
2168         adev->family = AMDGPU_FAMILY_GC_11_0_0;
2169         break;
2170     case IP_VERSION(11, 0, 1):
2171         adev->family = AMDGPU_FAMILY_GC_11_0_1;
2172         break;
2173     default:
2174         return -EINVAL;
2175     }
2176 
2177     switch (adev->ip_versions[GC_HWIP][0]) {
2178     case IP_VERSION(9, 1, 0):
2179     case IP_VERSION(9, 2, 2):
2180     case IP_VERSION(9, 3, 0):
2181     case IP_VERSION(10, 1, 3):
2182     case IP_VERSION(10, 1, 4):
2183     case IP_VERSION(10, 3, 1):
2184     case IP_VERSION(10, 3, 3):
2185     case IP_VERSION(10, 3, 6):
2186     case IP_VERSION(10, 3, 7):
2187     case IP_VERSION(11, 0, 1):
2188         adev->flags |= AMD_IS_APU;
2189         break;
2190     default:
2191         break;
2192     }
2193 
2194     if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2195         adev->gmc.xgmi.supported = true;
2196 
2197     /* set NBIO version */
2198     switch (adev->ip_versions[NBIO_HWIP][0]) {
2199     case IP_VERSION(6, 1, 0):
2200     case IP_VERSION(6, 2, 0):
2201         adev->nbio.funcs = &nbio_v6_1_funcs;
2202         adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2203         break;
2204     case IP_VERSION(7, 0, 0):
2205     case IP_VERSION(7, 0, 1):
2206     case IP_VERSION(2, 5, 0):
2207         adev->nbio.funcs = &nbio_v7_0_funcs;
2208         adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2209         break;
2210     case IP_VERSION(7, 4, 0):
2211     case IP_VERSION(7, 4, 1):
2212     case IP_VERSION(7, 4, 4):
2213         adev->nbio.funcs = &nbio_v7_4_funcs;
2214         adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2215         break;
2216     case IP_VERSION(7, 2, 0):
2217     case IP_VERSION(7, 2, 1):
2218     case IP_VERSION(7, 3, 0):
2219     case IP_VERSION(7, 5, 0):
2220     case IP_VERSION(7, 5, 1):
2221         adev->nbio.funcs = &nbio_v7_2_funcs;
2222         adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2223         break;
2224     case IP_VERSION(2, 1, 1):
2225     case IP_VERSION(2, 3, 0):
2226     case IP_VERSION(2, 3, 1):
2227     case IP_VERSION(2, 3, 2):
2228     case IP_VERSION(3, 3, 0):
2229     case IP_VERSION(3, 3, 1):
2230     case IP_VERSION(3, 3, 2):
2231     case IP_VERSION(3, 3, 3):
2232         adev->nbio.funcs = &nbio_v2_3_funcs;
2233         adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2234         break;
2235     case IP_VERSION(4, 3, 0):
2236     case IP_VERSION(4, 3, 1):
2237         adev->nbio.funcs = &nbio_v4_3_funcs;
2238         adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2239         break;
2240     case IP_VERSION(7, 7, 0):
2241         adev->nbio.funcs = &nbio_v7_7_funcs;
2242         adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2243         break;
2244     default:
2245         break;
2246     }
2247 
2248     switch (adev->ip_versions[HDP_HWIP][0]) {
2249     case IP_VERSION(4, 0, 0):
2250     case IP_VERSION(4, 0, 1):
2251     case IP_VERSION(4, 1, 0):
2252     case IP_VERSION(4, 1, 1):
2253     case IP_VERSION(4, 1, 2):
2254     case IP_VERSION(4, 2, 0):
2255     case IP_VERSION(4, 2, 1):
2256     case IP_VERSION(4, 4, 0):
2257         adev->hdp.funcs = &hdp_v4_0_funcs;
2258         break;
2259     case IP_VERSION(5, 0, 0):
2260     case IP_VERSION(5, 0, 1):
2261     case IP_VERSION(5, 0, 2):
2262     case IP_VERSION(5, 0, 3):
2263     case IP_VERSION(5, 0, 4):
2264     case IP_VERSION(5, 2, 0):
2265         adev->hdp.funcs = &hdp_v5_0_funcs;
2266         break;
2267     case IP_VERSION(5, 2, 1):
2268         adev->hdp.funcs = &hdp_v5_2_funcs;
2269         break;
2270     case IP_VERSION(6, 0, 0):
2271     case IP_VERSION(6, 0, 1):
2272         adev->hdp.funcs = &hdp_v6_0_funcs;
2273         break;
2274     default:
2275         break;
2276     }
2277 
2278     switch (adev->ip_versions[DF_HWIP][0]) {
2279     case IP_VERSION(3, 6, 0):
2280     case IP_VERSION(3, 6, 1):
2281     case IP_VERSION(3, 6, 2):
2282         adev->df.funcs = &df_v3_6_funcs;
2283         break;
2284     case IP_VERSION(2, 1, 0):
2285     case IP_VERSION(2, 1, 1):
2286     case IP_VERSION(2, 5, 0):
2287     case IP_VERSION(3, 5, 1):
2288     case IP_VERSION(3, 5, 2):
2289         adev->df.funcs = &df_v1_7_funcs;
2290         break;
2291     default:
2292         break;
2293     }
2294 
2295     switch (adev->ip_versions[SMUIO_HWIP][0]) {
2296     case IP_VERSION(9, 0, 0):
2297     case IP_VERSION(9, 0, 1):
2298     case IP_VERSION(10, 0, 0):
2299     case IP_VERSION(10, 0, 1):
2300     case IP_VERSION(10, 0, 2):
2301         adev->smuio.funcs = &smuio_v9_0_funcs;
2302         break;
2303     case IP_VERSION(11, 0, 0):
2304     case IP_VERSION(11, 0, 2):
2305     case IP_VERSION(11, 0, 3):
2306     case IP_VERSION(11, 0, 4):
2307     case IP_VERSION(11, 0, 7):
2308     case IP_VERSION(11, 0, 8):
2309         adev->smuio.funcs = &smuio_v11_0_funcs;
2310         break;
2311     case IP_VERSION(11, 0, 6):
2312     case IP_VERSION(11, 0, 10):
2313     case IP_VERSION(11, 0, 11):
2314     case IP_VERSION(11, 5, 0):
2315     case IP_VERSION(13, 0, 1):
2316     case IP_VERSION(13, 0, 9):
2317     case IP_VERSION(13, 0, 10):
2318         adev->smuio.funcs = &smuio_v11_0_6_funcs;
2319         break;
2320     case IP_VERSION(13, 0, 2):
2321         adev->smuio.funcs = &smuio_v13_0_funcs;
2322         break;
2323     case IP_VERSION(13, 0, 6):
2324     case IP_VERSION(13, 0, 8):
2325         adev->smuio.funcs = &smuio_v13_0_6_funcs;
2326         break;
2327     default:
2328         break;
2329     }
2330 
2331     switch (adev->ip_versions[LSDMA_HWIP][0]) {
2332     case IP_VERSION(6, 0, 0):
2333     case IP_VERSION(6, 0, 1):
2334     case IP_VERSION(6, 0, 2):
2335         adev->lsdma.funcs = &lsdma_v6_0_funcs;
2336         break;
2337     default:
2338         break;
2339     }
2340 
2341     r = amdgpu_discovery_set_common_ip_blocks(adev);
2342     if (r)
2343         return r;
2344 
2345     r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2346     if (r)
2347         return r;
2348 
2349     /* For SR-IOV, PSP needs to be initialized before IH */
2350     if (amdgpu_sriov_vf(adev)) {
2351         r = amdgpu_discovery_set_psp_ip_blocks(adev);
2352         if (r)
2353             return r;
2354         r = amdgpu_discovery_set_ih_ip_blocks(adev);
2355         if (r)
2356             return r;
2357     } else {
2358         r = amdgpu_discovery_set_ih_ip_blocks(adev);
2359         if (r)
2360             return r;
2361 
2362         if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2363             r = amdgpu_discovery_set_psp_ip_blocks(adev);
2364             if (r)
2365                 return r;
2366         }
2367     }
2368 
2369     if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2370         r = amdgpu_discovery_set_smu_ip_blocks(adev);
2371         if (r)
2372             return r;
2373     }
2374 
2375     r = amdgpu_discovery_set_display_ip_blocks(adev);
2376     if (r)
2377         return r;
2378 
2379     r = amdgpu_discovery_set_gc_ip_blocks(adev);
2380     if (r)
2381         return r;
2382 
2383     r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2384     if (r)
2385         return r;
2386 
2387     if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2388          !amdgpu_sriov_vf(adev)) ||
2389         (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2390         r = amdgpu_discovery_set_smu_ip_blocks(adev);
2391         if (r)
2392             return r;
2393     }
2394 
2395     r = amdgpu_discovery_set_mm_ip_blocks(adev);
2396     if (r)
2397         return r;
2398 
2399     r = amdgpu_discovery_set_mes_ip_blocks(adev);
2400     if (r)
2401         return r;
2402 
2403     return 0;
2404 }
2405