0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <drm/amdgpu_drm.h>
0028 #include "amdgpu.h"
0029 #include "amdgpu_i2c.h"
0030 #include "atom.h"
0031 #include "amdgpu_connectors.h"
0032 #include "amdgpu_display.h"
0033 #include "soc15_common.h"
0034 #include "gc/gc_11_0_0_offset.h"
0035 #include "gc/gc_11_0_0_sh_mask.h"
0036 #include <asm/div64.h>
0037
0038 #include <linux/pci.h>
0039 #include <linux/pm_runtime.h>
0040 #include <drm/drm_crtc_helper.h>
0041 #include <drm/drm_damage_helper.h>
0042 #include <drm/drm_drv.h>
0043 #include <drm/drm_edid.h>
0044 #include <drm/drm_gem_framebuffer_helper.h>
0045 #include <drm/drm_fb_helper.h>
0046 #include <drm/drm_fourcc.h>
0047 #include <drm/drm_vblank.h>
0048
0049 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
0050 struct amdgpu_framebuffer *rfb,
0051 const struct drm_mode_fb_cmd2 *mode_cmd,
0052 struct drm_gem_object *obj);
0053
0054 static void amdgpu_display_flip_callback(struct dma_fence *f,
0055 struct dma_fence_cb *cb)
0056 {
0057 struct amdgpu_flip_work *work =
0058 container_of(cb, struct amdgpu_flip_work, cb);
0059
0060 dma_fence_put(f);
0061 schedule_work(&work->flip_work.work);
0062 }
0063
0064 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
0065 struct dma_fence **f)
0066 {
0067 struct dma_fence *fence= *f;
0068
0069 if (fence == NULL)
0070 return false;
0071
0072 *f = NULL;
0073
0074 if (!dma_fence_add_callback(fence, &work->cb,
0075 amdgpu_display_flip_callback))
0076 return true;
0077
0078 dma_fence_put(fence);
0079 return false;
0080 }
0081
0082 static void amdgpu_display_flip_work_func(struct work_struct *__work)
0083 {
0084 struct delayed_work *delayed_work =
0085 container_of(__work, struct delayed_work, work);
0086 struct amdgpu_flip_work *work =
0087 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
0088 struct amdgpu_device *adev = work->adev;
0089 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
0090
0091 struct drm_crtc *crtc = &amdgpu_crtc->base;
0092 unsigned long flags;
0093 unsigned i;
0094 int vpos, hpos;
0095
0096 for (i = 0; i < work->shared_count; ++i)
0097 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
0098 return;
0099
0100
0101
0102
0103 if (amdgpu_crtc->enabled &&
0104 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
0105 &vpos, &hpos, NULL, NULL,
0106 &crtc->hwmode)
0107 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
0108 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
0109 (int)(work->target_vblank -
0110 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
0111 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
0112 return;
0113 }
0114
0115
0116 spin_lock_irqsave(&crtc->dev->event_lock, flags);
0117
0118
0119 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
0120
0121
0122 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
0123 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
0124
0125
0126 drm_dbg_vbl(adev_to_drm(adev),
0127 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
0128 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
0129
0130 }
0131
0132
0133
0134
0135 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
0136 {
0137 struct amdgpu_flip_work *work =
0138 container_of(__work, struct amdgpu_flip_work, unpin_work);
0139 int r;
0140
0141
0142 r = amdgpu_bo_reserve(work->old_abo, true);
0143 if (likely(r == 0)) {
0144 amdgpu_bo_unpin(work->old_abo);
0145 amdgpu_bo_unreserve(work->old_abo);
0146 } else
0147 DRM_ERROR("failed to reserve buffer after flip\n");
0148
0149 amdgpu_bo_unref(&work->old_abo);
0150 kfree(work->shared);
0151 kfree(work);
0152 }
0153
0154 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
0155 struct drm_framebuffer *fb,
0156 struct drm_pending_vblank_event *event,
0157 uint32_t page_flip_flags, uint32_t target,
0158 struct drm_modeset_acquire_ctx *ctx)
0159 {
0160 struct drm_device *dev = crtc->dev;
0161 struct amdgpu_device *adev = drm_to_adev(dev);
0162 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
0163 struct drm_gem_object *obj;
0164 struct amdgpu_flip_work *work;
0165 struct amdgpu_bo *new_abo;
0166 unsigned long flags;
0167 u64 tiling_flags;
0168 int i, r;
0169
0170 work = kzalloc(sizeof *work, GFP_KERNEL);
0171 if (work == NULL)
0172 return -ENOMEM;
0173
0174 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
0175 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
0176
0177 work->event = event;
0178 work->adev = adev;
0179 work->crtc_id = amdgpu_crtc->crtc_id;
0180 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
0181
0182
0183 obj = crtc->primary->fb->obj[0];
0184
0185
0186 work->old_abo = gem_to_amdgpu_bo(obj);
0187 amdgpu_bo_ref(work->old_abo);
0188
0189 obj = fb->obj[0];
0190 new_abo = gem_to_amdgpu_bo(obj);
0191
0192
0193 r = amdgpu_bo_reserve(new_abo, false);
0194 if (unlikely(r != 0)) {
0195 DRM_ERROR("failed to reserve new abo buffer before flip\n");
0196 goto cleanup;
0197 }
0198
0199 if (!adev->enable_virtual_display) {
0200 r = amdgpu_bo_pin(new_abo,
0201 amdgpu_display_supported_domains(adev, new_abo->flags));
0202 if (unlikely(r != 0)) {
0203 DRM_ERROR("failed to pin new abo buffer before flip\n");
0204 goto unreserve;
0205 }
0206 }
0207
0208 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
0209 if (unlikely(r != 0)) {
0210 DRM_ERROR("%p bind failed\n", new_abo);
0211 goto unpin;
0212 }
0213
0214 r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
0215 &work->shared_count,
0216 &work->shared);
0217 if (unlikely(r != 0)) {
0218 DRM_ERROR("failed to get fences for buffer\n");
0219 goto unpin;
0220 }
0221
0222 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
0223 amdgpu_bo_unreserve(new_abo);
0224
0225 if (!adev->enable_virtual_display)
0226 work->base = amdgpu_bo_gpu_offset(new_abo);
0227 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
0228 amdgpu_get_vblank_counter_kms(crtc);
0229
0230
0231 spin_lock_irqsave(&crtc->dev->event_lock, flags);
0232 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
0233 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
0234 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
0235 r = -EBUSY;
0236 goto pflip_cleanup;
0237 }
0238
0239 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
0240 amdgpu_crtc->pflip_works = work;
0241
0242
0243 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
0244 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
0245
0246 crtc->primary->fb = fb;
0247 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
0248 amdgpu_display_flip_work_func(&work->flip_work.work);
0249 return 0;
0250
0251 pflip_cleanup:
0252 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
0253 DRM_ERROR("failed to reserve new abo in error path\n");
0254 goto cleanup;
0255 }
0256 unpin:
0257 if (!adev->enable_virtual_display)
0258 amdgpu_bo_unpin(new_abo);
0259
0260 unreserve:
0261 amdgpu_bo_unreserve(new_abo);
0262
0263 cleanup:
0264 amdgpu_bo_unref(&work->old_abo);
0265 for (i = 0; i < work->shared_count; ++i)
0266 dma_fence_put(work->shared[i]);
0267 kfree(work->shared);
0268 kfree(work);
0269
0270 return r;
0271 }
0272
0273 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
0274 struct drm_modeset_acquire_ctx *ctx)
0275 {
0276 struct drm_device *dev;
0277 struct amdgpu_device *adev;
0278 struct drm_crtc *crtc;
0279 bool active = false;
0280 int ret;
0281
0282 if (!set || !set->crtc)
0283 return -EINVAL;
0284
0285 dev = set->crtc->dev;
0286
0287 ret = pm_runtime_get_sync(dev->dev);
0288 if (ret < 0)
0289 goto out;
0290
0291 ret = drm_crtc_helper_set_config(set, ctx);
0292
0293 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
0294 if (crtc->enabled)
0295 active = true;
0296
0297 pm_runtime_mark_last_busy(dev->dev);
0298
0299 adev = drm_to_adev(dev);
0300
0301
0302 if (active && !adev->have_disp_power_ref) {
0303 adev->have_disp_power_ref = true;
0304 return ret;
0305 }
0306
0307
0308 if (!active && adev->have_disp_power_ref) {
0309 pm_runtime_put_autosuspend(dev->dev);
0310 adev->have_disp_power_ref = false;
0311 }
0312
0313 out:
0314
0315 pm_runtime_put_autosuspend(dev->dev);
0316 return ret;
0317 }
0318
0319 static const char *encoder_names[41] = {
0320 "NONE",
0321 "INTERNAL_LVDS",
0322 "INTERNAL_TMDS1",
0323 "INTERNAL_TMDS2",
0324 "INTERNAL_DAC1",
0325 "INTERNAL_DAC2",
0326 "INTERNAL_SDVOA",
0327 "INTERNAL_SDVOB",
0328 "SI170B",
0329 "CH7303",
0330 "CH7301",
0331 "INTERNAL_DVO1",
0332 "EXTERNAL_SDVOA",
0333 "EXTERNAL_SDVOB",
0334 "TITFP513",
0335 "INTERNAL_LVTM1",
0336 "VT1623",
0337 "HDMI_SI1930",
0338 "HDMI_INTERNAL",
0339 "INTERNAL_KLDSCP_TMDS1",
0340 "INTERNAL_KLDSCP_DVO1",
0341 "INTERNAL_KLDSCP_DAC1",
0342 "INTERNAL_KLDSCP_DAC2",
0343 "SI178",
0344 "MVPU_FPGA",
0345 "INTERNAL_DDI",
0346 "VT1625",
0347 "HDMI_SI1932",
0348 "DP_AN9801",
0349 "DP_DP501",
0350 "INTERNAL_UNIPHY",
0351 "INTERNAL_KLDSCP_LVTMA",
0352 "INTERNAL_UNIPHY1",
0353 "INTERNAL_UNIPHY2",
0354 "NUTMEG",
0355 "TRAVIS",
0356 "INTERNAL_VCE",
0357 "INTERNAL_UNIPHY3",
0358 "HDMI_ANX9805",
0359 "INTERNAL_AMCLK",
0360 "VIRTUAL",
0361 };
0362
0363 static const char *hpd_names[6] = {
0364 "HPD1",
0365 "HPD2",
0366 "HPD3",
0367 "HPD4",
0368 "HPD5",
0369 "HPD6",
0370 };
0371
0372 void amdgpu_display_print_display_setup(struct drm_device *dev)
0373 {
0374 struct drm_connector *connector;
0375 struct amdgpu_connector *amdgpu_connector;
0376 struct drm_encoder *encoder;
0377 struct amdgpu_encoder *amdgpu_encoder;
0378 struct drm_connector_list_iter iter;
0379 uint32_t devices;
0380 int i = 0;
0381
0382 drm_connector_list_iter_begin(dev, &iter);
0383 DRM_INFO("AMDGPU Display Connectors\n");
0384 drm_for_each_connector_iter(connector, &iter) {
0385 amdgpu_connector = to_amdgpu_connector(connector);
0386 DRM_INFO("Connector %d:\n", i);
0387 DRM_INFO(" %s\n", connector->name);
0388 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
0389 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
0390 if (amdgpu_connector->ddc_bus) {
0391 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
0392 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
0393 amdgpu_connector->ddc_bus->rec.mask_data_reg,
0394 amdgpu_connector->ddc_bus->rec.a_clk_reg,
0395 amdgpu_connector->ddc_bus->rec.a_data_reg,
0396 amdgpu_connector->ddc_bus->rec.en_clk_reg,
0397 amdgpu_connector->ddc_bus->rec.en_data_reg,
0398 amdgpu_connector->ddc_bus->rec.y_clk_reg,
0399 amdgpu_connector->ddc_bus->rec.y_data_reg);
0400 if (amdgpu_connector->router.ddc_valid)
0401 DRM_INFO(" DDC Router 0x%x/0x%x\n",
0402 amdgpu_connector->router.ddc_mux_control_pin,
0403 amdgpu_connector->router.ddc_mux_state);
0404 if (amdgpu_connector->router.cd_valid)
0405 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
0406 amdgpu_connector->router.cd_mux_control_pin,
0407 amdgpu_connector->router.cd_mux_state);
0408 } else {
0409 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
0410 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
0411 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
0412 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
0413 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
0414 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
0415 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
0416 }
0417 DRM_INFO(" Encoders:\n");
0418 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
0419 amdgpu_encoder = to_amdgpu_encoder(encoder);
0420 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
0421 if (devices) {
0422 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
0423 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0424 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
0425 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0426 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
0427 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0428 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
0429 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0430 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
0431 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0432 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
0433 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0434 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
0435 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0436 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
0437 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0438 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
0439 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0440 if (devices & ATOM_DEVICE_TV1_SUPPORT)
0441 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0442 if (devices & ATOM_DEVICE_CV_SUPPORT)
0443 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
0444 }
0445 }
0446 i++;
0447 }
0448 drm_connector_list_iter_end(&iter);
0449 }
0450
0451 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
0452 bool use_aux)
0453 {
0454 u8 out = 0x0;
0455 u8 buf[8];
0456 int ret;
0457 struct i2c_msg msgs[] = {
0458 {
0459 .addr = DDC_ADDR,
0460 .flags = 0,
0461 .len = 1,
0462 .buf = &out,
0463 },
0464 {
0465 .addr = DDC_ADDR,
0466 .flags = I2C_M_RD,
0467 .len = 8,
0468 .buf = buf,
0469 }
0470 };
0471
0472
0473 if (amdgpu_connector->router.ddc_valid)
0474 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
0475
0476 if (use_aux) {
0477 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
0478 } else {
0479 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
0480 }
0481
0482 if (ret != 2)
0483
0484 return false;
0485
0486
0487
0488
0489
0490 if (drm_edid_header_is_valid(buf) < 6) {
0491
0492
0493 return false;
0494 }
0495 return true;
0496 }
0497
0498 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
0499 .destroy = drm_gem_fb_destroy,
0500 .create_handle = drm_gem_fb_create_handle,
0501 };
0502
0503 static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
0504 .destroy = drm_gem_fb_destroy,
0505 .create_handle = drm_gem_fb_create_handle,
0506 .dirty = drm_atomic_helper_dirtyfb,
0507 };
0508
0509 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
0510 uint64_t bo_flags)
0511 {
0512 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
0513
0514 #if defined(CONFIG_DRM_AMD_DC)
0515
0516
0517
0518
0519
0520
0521
0522
0523 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
0524 amdgpu_bo_support_uswc(bo_flags) &&
0525 amdgpu_device_asic_has_dc_support(adev->asic_type) &&
0526 adev->mode_info.gpu_vm_support)
0527 domain |= AMDGPU_GEM_DOMAIN_GTT;
0528 #endif
0529
0530 return domain;
0531 }
0532
0533 static const struct drm_format_info dcc_formats[] = {
0534 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
0535 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0536 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
0537 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0538 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
0539 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0540 .has_alpha = true, },
0541 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
0542 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0543 .has_alpha = true, },
0544 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
0545 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0546 .has_alpha = true, },
0547 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
0548 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0549 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
0550 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0551 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
0552 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0553 .has_alpha = true, },
0554 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
0555 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0556 .has_alpha = true, },
0557 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
0558 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0559 };
0560
0561 static const struct drm_format_info dcc_retile_formats[] = {
0562 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
0563 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0564 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
0565 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0566 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
0567 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0568 .has_alpha = true, },
0569 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
0570 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0571 .has_alpha = true, },
0572 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
0573 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0574 .has_alpha = true, },
0575 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
0576 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0577 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
0578 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0579 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
0580 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0581 .has_alpha = true, },
0582 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
0583 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
0584 .has_alpha = true, },
0585 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
0586 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
0587 };
0588
0589 static const struct drm_format_info *
0590 lookup_format_info(const struct drm_format_info formats[],
0591 int num_formats, u32 format)
0592 {
0593 int i;
0594
0595 for (i = 0; i < num_formats; i++) {
0596 if (formats[i].format == format)
0597 return &formats[i];
0598 }
0599
0600 return NULL;
0601 }
0602
0603 const struct drm_format_info *
0604 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
0605 {
0606 if (!IS_AMD_FMT_MOD(modifier))
0607 return NULL;
0608
0609 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
0610 return lookup_format_info(dcc_retile_formats,
0611 ARRAY_SIZE(dcc_retile_formats),
0612 format);
0613
0614 if (AMD_FMT_MOD_GET(DCC, modifier))
0615 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
0616 format);
0617
0618
0619 return NULL;
0620 }
0621
0622
0623
0624
0625
0626
0627 static int
0628 extract_render_dcc_offset(struct amdgpu_device *adev,
0629 struct drm_gem_object *obj,
0630 uint64_t *offset)
0631 {
0632 struct amdgpu_bo *rbo;
0633 int r = 0;
0634 uint32_t metadata[10];
0635 uint32_t size;
0636
0637 rbo = gem_to_amdgpu_bo(obj);
0638 r = amdgpu_bo_reserve(rbo, false);
0639
0640 if (unlikely(r)) {
0641
0642 if (r != -ERESTARTSYS)
0643 DRM_ERROR("Unable to reserve buffer: %d\n", r);
0644 return r;
0645 }
0646
0647 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
0648 amdgpu_bo_unreserve(rbo);
0649
0650 if (r)
0651 return r;
0652
0653
0654
0655
0656
0657 if (size < 40 || metadata[0] != 1)
0658 return -EINVAL;
0659
0660 if (adev->family >= AMDGPU_FAMILY_NV) {
0661
0662 *offset = ((u64)metadata[9] << 16u) |
0663 ((metadata[8] & 0xFF000000u) >> 16);
0664 } else {
0665
0666 *offset = ((u64)metadata[9] << 8u) |
0667 ((u64)(metadata[7] & 0x1FE0000u) << 23);
0668 }
0669
0670 return 0;
0671 }
0672
0673 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
0674 {
0675 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
0676 uint64_t modifier = 0;
0677 int num_pipes = 0;
0678 int num_pkrs = 0;
0679
0680 num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
0681 num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
0682
0683 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
0684 modifier = DRM_FORMAT_MOD_LINEAR;
0685 } else {
0686 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
0687 bool has_xor = swizzle >= 16;
0688 int block_size_bits;
0689 int version;
0690 int pipe_xor_bits = 0;
0691 int bank_xor_bits = 0;
0692 int packers = 0;
0693 int rb = 0;
0694 int pipes = ilog2(num_pipes);
0695 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
0696
0697 switch (swizzle >> 2) {
0698 case 0:
0699 block_size_bits = 8;
0700 break;
0701 case 1:
0702 case 5:
0703 block_size_bits = 12;
0704 break;
0705 case 2:
0706 case 4:
0707 case 6:
0708 block_size_bits = 16;
0709 break;
0710 case 7:
0711 block_size_bits = 18;
0712 break;
0713 default:
0714
0715 return -EINVAL;
0716 }
0717
0718 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
0719 version = AMD_FMT_MOD_TILE_VER_GFX11;
0720 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
0721 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
0722 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
0723 version = AMD_FMT_MOD_TILE_VER_GFX10;
0724 else
0725 version = AMD_FMT_MOD_TILE_VER_GFX9;
0726
0727 switch (swizzle & 3) {
0728 case 0:
0729 return -EINVAL;
0730 case 1:
0731 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {
0732 if (!has_xor)
0733 version = AMD_FMT_MOD_TILE_VER_GFX9;
0734 }
0735 break;
0736 case 2:
0737 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {
0738 if (!has_xor && afb->base.format->cpp[0] != 4)
0739 version = AMD_FMT_MOD_TILE_VER_GFX9;
0740 }
0741 break;
0742 case 3:
0743 break;
0744 }
0745
0746 if (has_xor) {
0747 if (num_pipes == num_pkrs && num_pkrs == 0) {
0748 DRM_ERROR("invalid number of pipes and packers\n");
0749 return -EINVAL;
0750 }
0751
0752 switch (version) {
0753 case AMD_FMT_MOD_TILE_VER_GFX11:
0754 pipe_xor_bits = min(block_size_bits - 8, pipes);
0755 packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
0756 break;
0757 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
0758 pipe_xor_bits = min(block_size_bits - 8, pipes);
0759 packers = min(block_size_bits - 8 - pipe_xor_bits,
0760 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
0761 break;
0762 case AMD_FMT_MOD_TILE_VER_GFX10:
0763 pipe_xor_bits = min(block_size_bits - 8, pipes);
0764 break;
0765 case AMD_FMT_MOD_TILE_VER_GFX9:
0766 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
0767 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
0768 pipe_xor_bits = min(block_size_bits - 8, pipes +
0769 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
0770 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
0771 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
0772 break;
0773 }
0774 }
0775
0776 modifier = AMD_FMT_MOD |
0777 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
0778 AMD_FMT_MOD_SET(TILE_VERSION, version) |
0779 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
0780 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
0781 AMD_FMT_MOD_SET(PACKERS, packers);
0782
0783 if (dcc_offset != 0) {
0784 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
0785 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
0786 const struct drm_format_info *format_info;
0787 u64 render_dcc_offset;
0788
0789
0790 bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN ||
0791 (adev->asic_type == CHIP_RAVEN &&
0792 adev->external_rev_id >= 0x81)) &&
0793 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0);
0794
0795 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
0796 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
0797 AMD_FMT_MOD_DCC_BLOCK_256B;
0798
0799 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
0800 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
0801 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
0802 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
0803 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
0804
0805 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
0806 afb->base.pitches[1] =
0807 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 if (extract_render_dcc_offset(adev, afb->base.obj[0],
0818 &render_dcc_offset) == 0 &&
0819 render_dcc_offset != 0 &&
0820 render_dcc_offset != afb->base.offsets[1] &&
0821 render_dcc_offset < UINT_MAX) {
0822 uint32_t dcc_block_bits;
0823
0824 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
0825 afb->base.offsets[2] = render_dcc_offset;
0826
0827 if (adev->family >= AMDGPU_FAMILY_NV) {
0828 int extra_pipe = 0;
0829
0830 if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
0831 pipes == packers && pipes > 1)
0832 extra_pipe = 1;
0833
0834 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
0835 } else {
0836 modifier |= AMD_FMT_MOD_SET(RB, rb) |
0837 AMD_FMT_MOD_SET(PIPE, pipes);
0838 dcc_block_bits = max(20, 18 + rb);
0839 }
0840
0841 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
0842 afb->base.pitches[2] = ALIGN(afb->base.width,
0843 1u << ((dcc_block_bits + 1) / 2));
0844 }
0845 format_info = amdgpu_lookup_format_info(afb->base.format->format,
0846 modifier);
0847 if (!format_info)
0848 return -EINVAL;
0849
0850 afb->base.format = format_info;
0851 }
0852 }
0853
0854 afb->base.modifier = modifier;
0855 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
0856 return 0;
0857 }
0858
0859
0860 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
0861 {
0862 u64 micro_tile_mode;
0863
0864
0865 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
0866 return 0;
0867
0868 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
0869 switch (micro_tile_mode) {
0870 case 0:
0871 case 3:
0872 return 0;
0873 default:
0874 drm_dbg_kms(afb->base.dev,
0875 "Micro tile mode %llu not supported for scanout\n",
0876 micro_tile_mode);
0877 return -EINVAL;
0878 }
0879 }
0880
0881 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
0882 unsigned int *width, unsigned int *height)
0883 {
0884 unsigned int cpp_log2 = ilog2(cpp);
0885 unsigned int pixel_log2 = block_log2 - cpp_log2;
0886 unsigned int width_log2 = (pixel_log2 + 1) / 2;
0887 unsigned int height_log2 = pixel_log2 - width_log2;
0888
0889 *width = 1 << width_log2;
0890 *height = 1 << height_log2;
0891 }
0892
0893 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
0894 bool pipe_aligned)
0895 {
0896 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
0897
0898 switch (ver) {
0899 case AMD_FMT_MOD_TILE_VER_GFX9: {
0900
0901
0902
0903
0904
0905 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
0906 }
0907 case AMD_FMT_MOD_TILE_VER_GFX10:
0908 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
0909 case AMD_FMT_MOD_TILE_VER_GFX11: {
0910 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
0911
0912 if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
0913 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
0914 ++pipes_log2;
0915
0916 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
0917 }
0918 default:
0919 return 0;
0920 }
0921 }
0922
0923 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
0924 const struct drm_format_info *format,
0925 unsigned int block_width, unsigned int block_height,
0926 unsigned int block_size_log2)
0927 {
0928 unsigned int width = rfb->base.width /
0929 ((plane && plane < format->num_planes) ? format->hsub : 1);
0930 unsigned int height = rfb->base.height /
0931 ((plane && plane < format->num_planes) ? format->vsub : 1);
0932 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
0933 unsigned int block_pitch = block_width * cpp;
0934 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
0935 unsigned int block_size = 1 << block_size_log2;
0936 uint64_t size;
0937
0938 if (rfb->base.pitches[plane] % block_pitch) {
0939 drm_dbg_kms(rfb->base.dev,
0940 "pitch %d for plane %d is not a multiple of block pitch %d\n",
0941 rfb->base.pitches[plane], plane, block_pitch);
0942 return -EINVAL;
0943 }
0944 if (rfb->base.pitches[plane] < min_pitch) {
0945 drm_dbg_kms(rfb->base.dev,
0946 "pitch %d for plane %d is less than minimum pitch %d\n",
0947 rfb->base.pitches[plane], plane, min_pitch);
0948 return -EINVAL;
0949 }
0950
0951
0952 if (rfb->base.offsets[plane] % block_size) {
0953 drm_dbg_kms(rfb->base.dev,
0954 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
0955 rfb->base.offsets[plane], plane, block_size);
0956 return -EINVAL;
0957 }
0958
0959 size = rfb->base.offsets[plane] +
0960 (uint64_t)rfb->base.pitches[plane] / block_pitch *
0961 block_size * DIV_ROUND_UP(height, block_height);
0962
0963 if (rfb->base.obj[0]->size < size) {
0964 drm_dbg_kms(rfb->base.dev,
0965 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
0966 rfb->base.obj[0]->size, size, plane);
0967 return -EINVAL;
0968 }
0969
0970 return 0;
0971 }
0972
0973
0974 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
0975 {
0976 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
0977 uint64_t modifier = rfb->base.modifier;
0978 int ret;
0979 unsigned int i, block_width, block_height, block_size_log2;
0980
0981 if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
0982 return 0;
0983
0984 for (i = 0; i < format_info->num_planes; ++i) {
0985 if (modifier == DRM_FORMAT_MOD_LINEAR) {
0986 block_width = 256 / format_info->cpp[i];
0987 block_height = 1;
0988 block_size_log2 = 8;
0989 } else {
0990 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
0991
0992 switch ((swizzle & ~3) + 1) {
0993 case DC_SW_256B_S:
0994 block_size_log2 = 8;
0995 break;
0996 case DC_SW_4KB_S:
0997 case DC_SW_4KB_S_X:
0998 block_size_log2 = 12;
0999 break;
1000 case DC_SW_64KB_S:
1001 case DC_SW_64KB_S_T:
1002 case DC_SW_64KB_S_X:
1003 block_size_log2 = 16;
1004 break;
1005 case DC_SW_VAR_S_X:
1006 block_size_log2 = 18;
1007 break;
1008 default:
1009 drm_dbg_kms(rfb->base.dev,
1010 "Swizzle mode with unknown block size: %d\n", swizzle);
1011 return -EINVAL;
1012 }
1013
1014 get_block_dimensions(block_size_log2, format_info->cpp[i],
1015 &block_width, &block_height);
1016 }
1017
1018 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1019 block_width, block_height, block_size_log2);
1020 if (ret)
1021 return ret;
1022 }
1023
1024 if (AMD_FMT_MOD_GET(DCC, modifier)) {
1025 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
1026 block_size_log2 = get_dcc_block_size(modifier, false, false);
1027 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1028 &block_width, &block_height);
1029 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1030 block_width, block_height,
1031 block_size_log2);
1032 if (ret)
1033 return ret;
1034
1035 ++i;
1036 block_size_log2 = get_dcc_block_size(modifier, true, true);
1037 } else {
1038 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1039
1040 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1041 }
1042 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1043 &block_width, &block_height);
1044 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1045 block_width, block_height, block_size_log2);
1046 if (ret)
1047 return ret;
1048 }
1049
1050 return 0;
1051 }
1052
1053 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1054 uint64_t *tiling_flags, bool *tmz_surface)
1055 {
1056 struct amdgpu_bo *rbo;
1057 int r;
1058
1059 if (!amdgpu_fb) {
1060 *tiling_flags = 0;
1061 *tmz_surface = false;
1062 return 0;
1063 }
1064
1065 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1066 r = amdgpu_bo_reserve(rbo, false);
1067
1068 if (unlikely(r)) {
1069
1070 if (r != -ERESTARTSYS)
1071 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1072 return r;
1073 }
1074
1075 if (tiling_flags)
1076 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1077
1078 if (tmz_surface)
1079 *tmz_surface = amdgpu_bo_encrypted(rbo);
1080
1081 amdgpu_bo_unreserve(rbo);
1082
1083 return r;
1084 }
1085
1086 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1087 struct amdgpu_framebuffer *rfb,
1088 struct drm_file *file_priv,
1089 const struct drm_mode_fb_cmd2 *mode_cmd,
1090 struct drm_gem_object *obj)
1091 {
1092 int ret;
1093
1094 rfb->base.obj[0] = obj;
1095 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1096
1097 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1098 mode_cmd->modifier[0])) {
1099 drm_dbg_kms(dev,
1100 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1101 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1102
1103 ret = -EINVAL;
1104 goto err;
1105 }
1106
1107 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1108 if (ret)
1109 goto err;
1110
1111 if (drm_drv_uses_atomic_modeset(dev))
1112 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
1113 else
1114 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1115 if (ret)
1116 goto err;
1117
1118 return 0;
1119 err:
1120 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1121 rfb->base.obj[0] = NULL;
1122 return ret;
1123 }
1124
1125 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1126 struct amdgpu_framebuffer *rfb,
1127 const struct drm_mode_fb_cmd2 *mode_cmd,
1128 struct drm_gem_object *obj)
1129 {
1130 struct amdgpu_device *adev = drm_to_adev(dev);
1131 int ret, i;
1132
1133
1134
1135
1136
1137 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1138 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1139 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1140 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1141 ret = -EINVAL;
1142 return ret;
1143 }
1144 }
1145
1146 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1147 if (ret)
1148 return ret;
1149
1150 if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1151 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1152 "GFX9+ requires FB check based on format modifier\n");
1153 ret = check_tiling_flags_gfx6(rfb);
1154 if (ret)
1155 return ret;
1156 }
1157
1158 if (!dev->mode_config.fb_modifiers_not_supported &&
1159 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1160 ret = convert_tiling_flags_to_modifier(rfb);
1161 if (ret) {
1162 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1163 rfb->tiling_flags);
1164 return ret;
1165 }
1166 }
1167
1168 ret = amdgpu_display_verify_sizes(rfb);
1169 if (ret)
1170 return ret;
1171
1172 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1173 drm_gem_object_get(rfb->base.obj[0]);
1174 rfb->base.obj[i] = rfb->base.obj[0];
1175 }
1176
1177 return 0;
1178 }
1179
1180 struct drm_framebuffer *
1181 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1182 struct drm_file *file_priv,
1183 const struct drm_mode_fb_cmd2 *mode_cmd)
1184 {
1185 struct amdgpu_framebuffer *amdgpu_fb;
1186 struct drm_gem_object *obj;
1187 struct amdgpu_bo *bo;
1188 uint32_t domains;
1189 int ret;
1190
1191 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1192 if (obj == NULL) {
1193 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1194 "can't create framebuffer\n", mode_cmd->handles[0]);
1195 return ERR_PTR(-ENOENT);
1196 }
1197
1198
1199 bo = gem_to_amdgpu_bo(obj);
1200 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1201 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1202 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1203 drm_gem_object_put(obj);
1204 return ERR_PTR(-EINVAL);
1205 }
1206
1207 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1208 if (amdgpu_fb == NULL) {
1209 drm_gem_object_put(obj);
1210 return ERR_PTR(-ENOMEM);
1211 }
1212
1213 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1214 mode_cmd, obj);
1215 if (ret) {
1216 kfree(amdgpu_fb);
1217 drm_gem_object_put(obj);
1218 return ERR_PTR(ret);
1219 }
1220
1221 drm_gem_object_put(obj);
1222 return &amdgpu_fb->base;
1223 }
1224
1225 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1226 .fb_create = amdgpu_display_user_framebuffer_create,
1227 .output_poll_changed = drm_fb_helper_output_poll_changed,
1228 };
1229
1230 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1231 { { UNDERSCAN_OFF, "off" },
1232 { UNDERSCAN_ON, "on" },
1233 { UNDERSCAN_AUTO, "auto" },
1234 };
1235
1236 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1237 { { AMDGPU_AUDIO_DISABLE, "off" },
1238 { AMDGPU_AUDIO_ENABLE, "on" },
1239 { AMDGPU_AUDIO_AUTO, "auto" },
1240 };
1241
1242
1243 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1244 { { AMDGPU_FMT_DITHER_DISABLE, "off" },
1245 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1246 };
1247
1248 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1249 {
1250 int sz;
1251
1252 adev->mode_info.coherent_mode_property =
1253 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1254 if (!adev->mode_info.coherent_mode_property)
1255 return -ENOMEM;
1256
1257 adev->mode_info.load_detect_property =
1258 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1259 if (!adev->mode_info.load_detect_property)
1260 return -ENOMEM;
1261
1262 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1263
1264 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1265 adev->mode_info.underscan_property =
1266 drm_property_create_enum(adev_to_drm(adev), 0,
1267 "underscan",
1268 amdgpu_underscan_enum_list, sz);
1269
1270 adev->mode_info.underscan_hborder_property =
1271 drm_property_create_range(adev_to_drm(adev), 0,
1272 "underscan hborder", 0, 128);
1273 if (!adev->mode_info.underscan_hborder_property)
1274 return -ENOMEM;
1275
1276 adev->mode_info.underscan_vborder_property =
1277 drm_property_create_range(adev_to_drm(adev), 0,
1278 "underscan vborder", 0, 128);
1279 if (!adev->mode_info.underscan_vborder_property)
1280 return -ENOMEM;
1281
1282 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1283 adev->mode_info.audio_property =
1284 drm_property_create_enum(adev_to_drm(adev), 0,
1285 "audio",
1286 amdgpu_audio_enum_list, sz);
1287
1288 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1289 adev->mode_info.dither_property =
1290 drm_property_create_enum(adev_to_drm(adev), 0,
1291 "dither",
1292 amdgpu_dither_enum_list, sz);
1293
1294 if (amdgpu_device_has_dc_support(adev)) {
1295 adev->mode_info.abm_level_property =
1296 drm_property_create_range(adev_to_drm(adev), 0,
1297 "abm level", 0, 4);
1298 if (!adev->mode_info.abm_level_property)
1299 return -ENOMEM;
1300 }
1301
1302 return 0;
1303 }
1304
1305 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1306 {
1307
1308 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1309 adev->mode_info.disp_priority = 0;
1310 else
1311 adev->mode_info.disp_priority = amdgpu_disp_priority;
1312
1313 }
1314
1315 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1316 {
1317
1318 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
1319 (mode->vdisplay == 576) ||
1320 (mode->vdisplay == 720) ||
1321 (mode->vdisplay == 1080))
1322 return true;
1323 else
1324 return false;
1325 }
1326
1327 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1328 const struct drm_display_mode *mode,
1329 struct drm_display_mode *adjusted_mode)
1330 {
1331 struct drm_device *dev = crtc->dev;
1332 struct drm_encoder *encoder;
1333 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1334 struct amdgpu_encoder *amdgpu_encoder;
1335 struct drm_connector *connector;
1336 u32 src_v = 1, dst_v = 1;
1337 u32 src_h = 1, dst_h = 1;
1338
1339 amdgpu_crtc->h_border = 0;
1340 amdgpu_crtc->v_border = 0;
1341
1342 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1343 if (encoder->crtc != crtc)
1344 continue;
1345 amdgpu_encoder = to_amdgpu_encoder(encoder);
1346 connector = amdgpu_get_connector_for_encoder(encoder);
1347
1348
1349 if (amdgpu_encoder->rmx_type == RMX_OFF)
1350 amdgpu_crtc->rmx_type = RMX_OFF;
1351 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1352 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1353 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1354 else
1355 amdgpu_crtc->rmx_type = RMX_OFF;
1356
1357 memcpy(&amdgpu_crtc->native_mode,
1358 &amdgpu_encoder->native_mode,
1359 sizeof(struct drm_display_mode));
1360 src_v = crtc->mode.vdisplay;
1361 dst_v = amdgpu_crtc->native_mode.vdisplay;
1362 src_h = crtc->mode.hdisplay;
1363 dst_h = amdgpu_crtc->native_mode.hdisplay;
1364
1365
1366 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1367 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1368 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1369 connector->display_info.is_hdmi &&
1370 amdgpu_display_is_hdtv_mode(mode)))) {
1371 if (amdgpu_encoder->underscan_hborder != 0)
1372 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1373 else
1374 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1375 if (amdgpu_encoder->underscan_vborder != 0)
1376 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1377 else
1378 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1379 amdgpu_crtc->rmx_type = RMX_FULL;
1380 src_v = crtc->mode.vdisplay;
1381 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1382 src_h = crtc->mode.hdisplay;
1383 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1384 }
1385 }
1386 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1387 fixed20_12 a, b;
1388 a.full = dfixed_const(src_v);
1389 b.full = dfixed_const(dst_v);
1390 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1391 a.full = dfixed_const(src_h);
1392 b.full = dfixed_const(dst_h);
1393 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1394 } else {
1395 amdgpu_crtc->vsc.full = dfixed_const(1);
1396 amdgpu_crtc->hsc.full = dfixed_const(1);
1397 }
1398 return true;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1439 unsigned int pipe, unsigned int flags, int *vpos,
1440 int *hpos, ktime_t *stime, ktime_t *etime,
1441 const struct drm_display_mode *mode)
1442 {
1443 u32 vbl = 0, position = 0;
1444 int vbl_start, vbl_end, vtotal, ret = 0;
1445 bool in_vbl = true;
1446
1447 struct amdgpu_device *adev = drm_to_adev(dev);
1448
1449
1450
1451
1452 if (stime)
1453 *stime = ktime_get();
1454
1455 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1456 ret |= DRM_SCANOUTPOS_VALID;
1457
1458
1459 if (etime)
1460 *etime = ktime_get();
1461
1462
1463
1464
1465 *vpos = position & 0x1fff;
1466 *hpos = (position >> 16) & 0x1fff;
1467
1468
1469 if (vbl > 0) {
1470
1471 ret |= DRM_SCANOUTPOS_ACCURATE;
1472 vbl_start = vbl & 0x1fff;
1473 vbl_end = (vbl >> 16) & 0x1fff;
1474 }
1475 else {
1476
1477 vbl_start = mode->crtc_vdisplay;
1478 vbl_end = 0;
1479 }
1480
1481
1482 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1483
1484 *hpos = *vpos - vbl_start;
1485 }
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 if (!(flags & USE_REAL_VBLANKSTART))
1498 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1499
1500
1501 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1502 in_vbl = false;
1503
1504
1505 if (in_vbl)
1506 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1507
1508
1509 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1510
1511 *vpos -= vbl_start;
1512 return ret;
1513 }
1514
1515
1516
1517
1518
1519
1520
1521
1522 if (in_vbl && (*vpos >= vbl_start)) {
1523 vtotal = mode->crtc_vtotal;
1524
1525
1526
1527
1528
1529 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1530 }
1531
1532
1533 *vpos = *vpos - vbl_end;
1534
1535 return ret;
1536 }
1537
1538 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1539 {
1540 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1541 return AMDGPU_CRTC_IRQ_NONE;
1542
1543 switch (crtc) {
1544 case 0:
1545 return AMDGPU_CRTC_IRQ_VBLANK1;
1546 case 1:
1547 return AMDGPU_CRTC_IRQ_VBLANK2;
1548 case 2:
1549 return AMDGPU_CRTC_IRQ_VBLANK3;
1550 case 3:
1551 return AMDGPU_CRTC_IRQ_VBLANK4;
1552 case 4:
1553 return AMDGPU_CRTC_IRQ_VBLANK5;
1554 case 5:
1555 return AMDGPU_CRTC_IRQ_VBLANK6;
1556 default:
1557 return AMDGPU_CRTC_IRQ_NONE;
1558 }
1559 }
1560
1561 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1562 bool in_vblank_irq, int *vpos,
1563 int *hpos, ktime_t *stime, ktime_t *etime,
1564 const struct drm_display_mode *mode)
1565 {
1566 struct drm_device *dev = crtc->dev;
1567 unsigned int pipe = crtc->index;
1568
1569 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1570 stime, etime, mode);
1571 }
1572
1573 static bool
1574 amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1575 {
1576 struct drm_device *dev = adev_to_drm(adev);
1577 struct drm_fb_helper *fb_helper = dev->fb_helper;
1578
1579 if (!fb_helper || !fb_helper->buffer)
1580 return false;
1581
1582 if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1583 return false;
1584
1585 return true;
1586 }
1587
1588 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1589 {
1590 struct drm_device *dev = adev_to_drm(adev);
1591 struct drm_crtc *crtc;
1592 struct drm_connector *connector;
1593 struct drm_connector_list_iter iter;
1594 int r;
1595
1596
1597 drm_modeset_lock_all(dev);
1598 drm_connector_list_iter_begin(dev, &iter);
1599 drm_for_each_connector_iter(connector, &iter)
1600 drm_helper_connector_dpms(connector,
1601 DRM_MODE_DPMS_OFF);
1602 drm_connector_list_iter_end(&iter);
1603 drm_modeset_unlock_all(dev);
1604
1605 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1606 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1607 struct drm_framebuffer *fb = crtc->primary->fb;
1608 struct amdgpu_bo *robj;
1609
1610 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1611 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1612 r = amdgpu_bo_reserve(aobj, true);
1613 if (r == 0) {
1614 amdgpu_bo_unpin(aobj);
1615 amdgpu_bo_unreserve(aobj);
1616 }
1617 }
1618
1619 if (fb == NULL || fb->obj[0] == NULL) {
1620 continue;
1621 }
1622 robj = gem_to_amdgpu_bo(fb->obj[0]);
1623 if (!amdgpu_display_robj_is_fb(adev, robj)) {
1624 r = amdgpu_bo_reserve(robj, true);
1625 if (r == 0) {
1626 amdgpu_bo_unpin(robj);
1627 amdgpu_bo_unreserve(robj);
1628 }
1629 }
1630 }
1631 return 0;
1632 }
1633
1634 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1635 {
1636 struct drm_device *dev = adev_to_drm(adev);
1637 struct drm_connector *connector;
1638 struct drm_connector_list_iter iter;
1639 struct drm_crtc *crtc;
1640 int r;
1641
1642
1643 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1644 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1645
1646 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1647 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1648 r = amdgpu_bo_reserve(aobj, true);
1649 if (r == 0) {
1650 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1651 if (r != 0)
1652 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1653 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1654 amdgpu_bo_unreserve(aobj);
1655 }
1656 }
1657 }
1658
1659 drm_helper_resume_force_mode(dev);
1660
1661
1662 drm_modeset_lock_all(dev);
1663
1664 drm_connector_list_iter_begin(dev, &iter);
1665 drm_for_each_connector_iter(connector, &iter)
1666 drm_helper_connector_dpms(connector,
1667 DRM_MODE_DPMS_ON);
1668 drm_connector_list_iter_end(&iter);
1669
1670 drm_modeset_unlock_all(dev);
1671
1672 return 0;
1673 }
1674