0001
0002
0003 #include <drm/drm_atomic_helper.h>
0004 #include <drm/drm_simple_kms_helper.h>
0005 #include <drm/drm_vblank.h>
0006
0007 #include "amdgpu.h"
0008 #ifdef CONFIG_DRM_AMDGPU_SI
0009 #include "dce_v6_0.h"
0010 #endif
0011 #ifdef CONFIG_DRM_AMDGPU_CIK
0012 #include "dce_v8_0.h"
0013 #endif
0014 #include "dce_v10_0.h"
0015 #include "dce_v11_0.h"
0016 #include "ivsrcid/ivsrcid_vislands30.h"
0017 #include "amdgpu_vkms.h"
0018 #include "amdgpu_display.h"
0019 #include "atom.h"
0020 #include "amdgpu_irq.h"
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 static const u32 amdgpu_vkms_formats[] = {
0041 DRM_FORMAT_XRGB8888,
0042 };
0043
0044 static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
0045 {
0046 struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer);
0047 struct drm_crtc *crtc = &amdgpu_crtc->base;
0048 struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
0049 u64 ret_overrun;
0050 bool ret;
0051
0052 ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
0053 output->period_ns);
0054 if (ret_overrun != 1)
0055 DRM_WARN("%s: vblank timer overrun\n", __func__);
0056
0057 ret = drm_crtc_handle_vblank(crtc);
0058 if (!ret)
0059 DRM_ERROR("amdgpu_vkms failure on handling vblank");
0060
0061 return HRTIMER_RESTART;
0062 }
0063
0064 static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
0065 {
0066 struct drm_device *dev = crtc->dev;
0067 unsigned int pipe = drm_crtc_index(crtc);
0068 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
0069 struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
0070 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0071
0072 drm_calc_timestamping_constants(crtc, &crtc->mode);
0073
0074 out->period_ns = ktime_set(0, vblank->framedur_ns);
0075 hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL);
0076
0077 return 0;
0078 }
0079
0080 static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
0081 {
0082 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0083
0084 hrtimer_cancel(&amdgpu_crtc->vblank_timer);
0085 }
0086
0087 static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
0088 int *max_error,
0089 ktime_t *vblank_time,
0090 bool in_vblank_irq)
0091 {
0092 struct drm_device *dev = crtc->dev;
0093 unsigned int pipe = crtc->index;
0094 struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
0095 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
0096 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0097
0098 if (!READ_ONCE(vblank->enabled)) {
0099 *vblank_time = ktime_get();
0100 return true;
0101 }
0102
0103 *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
0104
0105 if (WARN_ON(*vblank_time == vblank->time))
0106 return true;
0107
0108
0109
0110
0111
0112
0113
0114
0115 *vblank_time -= output->period_ns;
0116
0117 return true;
0118 }
0119
0120 static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = {
0121 .set_config = drm_atomic_helper_set_config,
0122 .destroy = drm_crtc_cleanup,
0123 .page_flip = drm_atomic_helper_page_flip,
0124 .reset = drm_atomic_helper_crtc_reset,
0125 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
0126 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
0127 .enable_vblank = amdgpu_vkms_enable_vblank,
0128 .disable_vblank = amdgpu_vkms_disable_vblank,
0129 .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp,
0130 };
0131
0132 static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
0133 struct drm_atomic_state *state)
0134 {
0135 drm_crtc_vblank_on(crtc);
0136 }
0137
0138 static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
0139 struct drm_atomic_state *state)
0140 {
0141 drm_crtc_vblank_off(crtc);
0142 }
0143
0144 static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
0145 struct drm_atomic_state *state)
0146 {
0147 unsigned long flags;
0148 if (crtc->state->event) {
0149 spin_lock_irqsave(&crtc->dev->event_lock, flags);
0150
0151 if (drm_crtc_vblank_get(crtc) != 0)
0152 drm_crtc_send_vblank_event(crtc, crtc->state->event);
0153 else
0154 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
0155
0156 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
0157
0158 crtc->state->event = NULL;
0159 }
0160 }
0161
0162 static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
0163 .atomic_flush = amdgpu_vkms_crtc_atomic_flush,
0164 .atomic_enable = amdgpu_vkms_crtc_atomic_enable,
0165 .atomic_disable = amdgpu_vkms_crtc_atomic_disable,
0166 };
0167
0168 static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
0169 struct drm_plane *primary, struct drm_plane *cursor)
0170 {
0171 struct amdgpu_device *adev = drm_to_adev(dev);
0172 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0173 int ret;
0174
0175 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
0176 &amdgpu_vkms_crtc_funcs, NULL);
0177 if (ret) {
0178 DRM_ERROR("Failed to init CRTC\n");
0179 return ret;
0180 }
0181
0182 drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
0183
0184 amdgpu_crtc->crtc_id = drm_crtc_index(crtc);
0185 adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc;
0186
0187 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
0188 amdgpu_crtc->encoder = NULL;
0189 amdgpu_crtc->connector = NULL;
0190 amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
0191
0192 hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
0193 amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
0194
0195 return ret;
0196 }
0197
0198 static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = {
0199 .fill_modes = drm_helper_probe_single_connector_modes,
0200 .destroy = drm_connector_cleanup,
0201 .reset = drm_atomic_helper_connector_reset,
0202 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
0203 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
0204 };
0205
0206 static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
0207 {
0208 struct drm_device *dev = connector->dev;
0209 struct drm_display_mode *mode = NULL;
0210 unsigned i;
0211 static const struct mode_size {
0212 int w;
0213 int h;
0214 } common_modes[] = {
0215 { 640, 480},
0216 { 720, 480},
0217 { 800, 600},
0218 { 848, 480},
0219 {1024, 768},
0220 {1152, 768},
0221 {1280, 720},
0222 {1280, 800},
0223 {1280, 854},
0224 {1280, 960},
0225 {1280, 1024},
0226 {1440, 900},
0227 {1400, 1050},
0228 {1680, 1050},
0229 {1600, 1200},
0230 {1920, 1080},
0231 {1920, 1200},
0232 {2560, 1440},
0233 {4096, 3112},
0234 {3656, 2664},
0235 {3840, 2160},
0236 {4096, 2160},
0237 };
0238
0239 for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
0240 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
0241 drm_mode_probed_add(connector, mode);
0242 }
0243
0244 drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
0245
0246 return ARRAY_SIZE(common_modes);
0247 }
0248
0249 static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = {
0250 .get_modes = amdgpu_vkms_conn_get_modes,
0251 };
0252
0253 static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = {
0254 .update_plane = drm_atomic_helper_update_plane,
0255 .disable_plane = drm_atomic_helper_disable_plane,
0256 .destroy = drm_plane_cleanup,
0257 .reset = drm_atomic_helper_plane_reset,
0258 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
0259 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
0260 };
0261
0262 static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane,
0263 struct drm_atomic_state *old_state)
0264 {
0265 return;
0266 }
0267
0268 static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
0269 struct drm_atomic_state *state)
0270 {
0271 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
0272 plane);
0273 struct drm_crtc_state *crtc_state;
0274 int ret;
0275
0276 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
0277 return 0;
0278
0279 crtc_state = drm_atomic_get_crtc_state(state,
0280 new_plane_state->crtc);
0281 if (IS_ERR(crtc_state))
0282 return PTR_ERR(crtc_state);
0283
0284 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0285 DRM_PLANE_HELPER_NO_SCALING,
0286 DRM_PLANE_HELPER_NO_SCALING,
0287 false, true);
0288 if (ret != 0)
0289 return ret;
0290
0291
0292 if (!new_plane_state->visible)
0293 return -EINVAL;
0294
0295 return 0;
0296 }
0297
0298 static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
0299 struct drm_plane_state *new_state)
0300 {
0301 struct amdgpu_framebuffer *afb;
0302 struct drm_gem_object *obj;
0303 struct amdgpu_device *adev;
0304 struct amdgpu_bo *rbo;
0305 uint32_t domain;
0306 int r;
0307
0308 if (!new_state->fb) {
0309 DRM_DEBUG_KMS("No FB bound\n");
0310 return 0;
0311 }
0312 afb = to_amdgpu_framebuffer(new_state->fb);
0313 obj = new_state->fb->obj[0];
0314 rbo = gem_to_amdgpu_bo(obj);
0315 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0316
0317 r = amdgpu_bo_reserve(rbo, true);
0318 if (r) {
0319 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
0320 return r;
0321 }
0322
0323 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
0324 if (r) {
0325 dev_err(adev->dev, "allocating fence slot failed (%d)\n", r);
0326 goto error_unlock;
0327 }
0328
0329 if (plane->type != DRM_PLANE_TYPE_CURSOR)
0330 domain = amdgpu_display_supported_domains(adev, rbo->flags);
0331 else
0332 domain = AMDGPU_GEM_DOMAIN_VRAM;
0333
0334 r = amdgpu_bo_pin(rbo, domain);
0335 if (unlikely(r != 0)) {
0336 if (r != -ERESTARTSYS)
0337 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0338 goto error_unlock;
0339 }
0340
0341 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
0342 if (unlikely(r != 0)) {
0343 DRM_ERROR("%p bind failed\n", rbo);
0344 goto error_unpin;
0345 }
0346
0347 amdgpu_bo_unreserve(rbo);
0348
0349 afb->address = amdgpu_bo_gpu_offset(rbo);
0350
0351 amdgpu_bo_ref(rbo);
0352
0353 return 0;
0354
0355 error_unpin:
0356 amdgpu_bo_unpin(rbo);
0357
0358 error_unlock:
0359 amdgpu_bo_unreserve(rbo);
0360 return r;
0361 }
0362
0363 static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
0364 struct drm_plane_state *old_state)
0365 {
0366 struct amdgpu_bo *rbo;
0367 int r;
0368
0369 if (!old_state->fb)
0370 return;
0371
0372 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
0373 r = amdgpu_bo_reserve(rbo, false);
0374 if (unlikely(r)) {
0375 DRM_ERROR("failed to reserve rbo before unpin\n");
0376 return;
0377 }
0378
0379 amdgpu_bo_unpin(rbo);
0380 amdgpu_bo_unreserve(rbo);
0381 amdgpu_bo_unref(&rbo);
0382 }
0383
0384 static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = {
0385 .atomic_update = amdgpu_vkms_plane_atomic_update,
0386 .atomic_check = amdgpu_vkms_plane_atomic_check,
0387 .prepare_fb = amdgpu_vkms_prepare_fb,
0388 .cleanup_fb = amdgpu_vkms_cleanup_fb,
0389 };
0390
0391 static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
0392 enum drm_plane_type type,
0393 int index)
0394 {
0395 struct drm_plane *plane;
0396 int ret;
0397
0398 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
0399 if (!plane)
0400 return ERR_PTR(-ENOMEM);
0401
0402 ret = drm_universal_plane_init(dev, plane, 1 << index,
0403 &amdgpu_vkms_plane_funcs,
0404 amdgpu_vkms_formats,
0405 ARRAY_SIZE(amdgpu_vkms_formats),
0406 NULL, type, NULL);
0407 if (ret) {
0408 kfree(plane);
0409 return ERR_PTR(ret);
0410 }
0411
0412 drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs);
0413
0414 return plane;
0415 }
0416
0417 static int amdgpu_vkms_output_init(struct drm_device *dev, struct
0418 amdgpu_vkms_output *output, int index)
0419 {
0420 struct drm_connector *connector = &output->connector;
0421 struct drm_encoder *encoder = &output->encoder;
0422 struct drm_crtc *crtc = &output->crtc.base;
0423 struct drm_plane *primary, *cursor = NULL;
0424 int ret;
0425
0426 primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index);
0427 if (IS_ERR(primary))
0428 return PTR_ERR(primary);
0429
0430 ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor);
0431 if (ret)
0432 goto err_crtc;
0433
0434 ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs,
0435 DRM_MODE_CONNECTOR_VIRTUAL);
0436 if (ret) {
0437 DRM_ERROR("Failed to init connector\n");
0438 goto err_connector;
0439 }
0440
0441 drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs);
0442
0443 ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
0444 if (ret) {
0445 DRM_ERROR("Failed to init encoder\n");
0446 goto err_encoder;
0447 }
0448 encoder->possible_crtcs = 1 << index;
0449
0450 ret = drm_connector_attach_encoder(connector, encoder);
0451 if (ret) {
0452 DRM_ERROR("Failed to attach connector to encoder\n");
0453 goto err_attach;
0454 }
0455
0456 drm_mode_config_reset(dev);
0457
0458 return 0;
0459
0460 err_attach:
0461 drm_encoder_cleanup(encoder);
0462
0463 err_encoder:
0464 drm_connector_cleanup(connector);
0465
0466 err_connector:
0467 drm_crtc_cleanup(crtc);
0468
0469 err_crtc:
0470 drm_plane_cleanup(primary);
0471
0472 return ret;
0473 }
0474
0475 const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
0476 .fb_create = amdgpu_display_user_framebuffer_create,
0477 .atomic_check = drm_atomic_helper_check,
0478 .atomic_commit = drm_atomic_helper_commit,
0479 };
0480
0481 static int amdgpu_vkms_sw_init(void *handle)
0482 {
0483 int r, i;
0484 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0485
0486 adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
0487 sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
0488 if (!adev->amdgpu_vkms_output)
0489 return -ENOMEM;
0490
0491 adev_to_drm(adev)->max_vblank_count = 0;
0492
0493 adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
0494
0495 adev_to_drm(adev)->mode_config.max_width = XRES_MAX;
0496 adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
0497
0498 adev_to_drm(adev)->mode_config.preferred_depth = 24;
0499 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
0500
0501 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
0502
0503 r = amdgpu_display_modeset_create_props(adev);
0504 if (r)
0505 return r;
0506
0507
0508 for (i = 0; i < adev->mode_info.num_crtc; i++) {
0509 r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
0510 if (r)
0511 return r;
0512 }
0513
0514 drm_kms_helper_poll_init(adev_to_drm(adev));
0515
0516 adev->mode_info.mode_config_initialized = true;
0517 return 0;
0518 }
0519
0520 static int amdgpu_vkms_sw_fini(void *handle)
0521 {
0522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0523 int i = 0;
0524
0525 for (i = 0; i < adev->mode_info.num_crtc; i++)
0526 if (adev->mode_info.crtcs[i])
0527 hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
0528
0529 drm_kms_helper_poll_fini(adev_to_drm(adev));
0530 drm_mode_config_cleanup(adev_to_drm(adev));
0531
0532 adev->mode_info.mode_config_initialized = false;
0533
0534 kfree(adev->mode_info.bios_hardcoded_edid);
0535 kfree(adev->amdgpu_vkms_output);
0536 return 0;
0537 }
0538
0539 static int amdgpu_vkms_hw_init(void *handle)
0540 {
0541 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0542
0543 switch (adev->asic_type) {
0544 #ifdef CONFIG_DRM_AMDGPU_SI
0545 case CHIP_TAHITI:
0546 case CHIP_PITCAIRN:
0547 case CHIP_VERDE:
0548 case CHIP_OLAND:
0549 dce_v6_0_disable_dce(adev);
0550 break;
0551 #endif
0552 #ifdef CONFIG_DRM_AMDGPU_CIK
0553 case CHIP_BONAIRE:
0554 case CHIP_HAWAII:
0555 case CHIP_KAVERI:
0556 case CHIP_KABINI:
0557 case CHIP_MULLINS:
0558 dce_v8_0_disable_dce(adev);
0559 break;
0560 #endif
0561 case CHIP_FIJI:
0562 case CHIP_TONGA:
0563 dce_v10_0_disable_dce(adev);
0564 break;
0565 case CHIP_CARRIZO:
0566 case CHIP_STONEY:
0567 case CHIP_POLARIS10:
0568 case CHIP_POLARIS11:
0569 case CHIP_VEGAM:
0570 dce_v11_0_disable_dce(adev);
0571 break;
0572 case CHIP_TOPAZ:
0573 #ifdef CONFIG_DRM_AMDGPU_SI
0574 case CHIP_HAINAN:
0575 #endif
0576
0577 break;
0578 default:
0579 break;
0580 }
0581 return 0;
0582 }
0583
0584 static int amdgpu_vkms_hw_fini(void *handle)
0585 {
0586 return 0;
0587 }
0588
0589 static int amdgpu_vkms_suspend(void *handle)
0590 {
0591 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0592 int r;
0593
0594 r = drm_mode_config_helper_suspend(adev_to_drm(adev));
0595 if (r)
0596 return r;
0597 return amdgpu_vkms_hw_fini(handle);
0598 }
0599
0600 static int amdgpu_vkms_resume(void *handle)
0601 {
0602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0603 int r;
0604
0605 r = amdgpu_vkms_hw_init(handle);
0606 if (r)
0607 return r;
0608 return drm_mode_config_helper_resume(adev_to_drm(adev));
0609 }
0610
0611 static bool amdgpu_vkms_is_idle(void *handle)
0612 {
0613 return true;
0614 }
0615
0616 static int amdgpu_vkms_wait_for_idle(void *handle)
0617 {
0618 return 0;
0619 }
0620
0621 static int amdgpu_vkms_soft_reset(void *handle)
0622 {
0623 return 0;
0624 }
0625
0626 static int amdgpu_vkms_set_clockgating_state(void *handle,
0627 enum amd_clockgating_state state)
0628 {
0629 return 0;
0630 }
0631
0632 static int amdgpu_vkms_set_powergating_state(void *handle,
0633 enum amd_powergating_state state)
0634 {
0635 return 0;
0636 }
0637
0638 static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
0639 .name = "amdgpu_vkms",
0640 .early_init = NULL,
0641 .late_init = NULL,
0642 .sw_init = amdgpu_vkms_sw_init,
0643 .sw_fini = amdgpu_vkms_sw_fini,
0644 .hw_init = amdgpu_vkms_hw_init,
0645 .hw_fini = amdgpu_vkms_hw_fini,
0646 .suspend = amdgpu_vkms_suspend,
0647 .resume = amdgpu_vkms_resume,
0648 .is_idle = amdgpu_vkms_is_idle,
0649 .wait_for_idle = amdgpu_vkms_wait_for_idle,
0650 .soft_reset = amdgpu_vkms_soft_reset,
0651 .set_clockgating_state = amdgpu_vkms_set_clockgating_state,
0652 .set_powergating_state = amdgpu_vkms_set_powergating_state,
0653 };
0654
0655 const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
0656 {
0657 .type = AMD_IP_BLOCK_TYPE_DCE,
0658 .major = 1,
0659 .minor = 0,
0660 .rev = 0,
0661 .funcs = &amdgpu_vkms_ip_funcs,
0662 };
0663