Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2014 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #include <drm/drm_fourcc.h>
0025 #include <drm/drm_vblank.h>
0026 
0027 #include "amdgpu.h"
0028 #include "amdgpu_pm.h"
0029 #include "amdgpu_i2c.h"
0030 #include "vid.h"
0031 #include "atom.h"
0032 #include "amdgpu_atombios.h"
0033 #include "atombios_crtc.h"
0034 #include "atombios_encoders.h"
0035 #include "amdgpu_pll.h"
0036 #include "amdgpu_connectors.h"
0037 #include "amdgpu_display.h"
0038 #include "dce_v10_0.h"
0039 
0040 #include "dce/dce_10_0_d.h"
0041 #include "dce/dce_10_0_sh_mask.h"
0042 #include "dce/dce_10_0_enum.h"
0043 #include "oss/oss_3_0_d.h"
0044 #include "oss/oss_3_0_sh_mask.h"
0045 #include "gmc/gmc_8_1_d.h"
0046 #include "gmc/gmc_8_1_sh_mask.h"
0047 
0048 #include "ivsrcid/ivsrcid_vislands30.h"
0049 
0050 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
0051 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
0052 
0053 static const u32 crtc_offsets[] =
0054 {
0055     CRTC0_REGISTER_OFFSET,
0056     CRTC1_REGISTER_OFFSET,
0057     CRTC2_REGISTER_OFFSET,
0058     CRTC3_REGISTER_OFFSET,
0059     CRTC4_REGISTER_OFFSET,
0060     CRTC5_REGISTER_OFFSET,
0061     CRTC6_REGISTER_OFFSET
0062 };
0063 
0064 static const u32 hpd_offsets[] =
0065 {
0066     HPD0_REGISTER_OFFSET,
0067     HPD1_REGISTER_OFFSET,
0068     HPD2_REGISTER_OFFSET,
0069     HPD3_REGISTER_OFFSET,
0070     HPD4_REGISTER_OFFSET,
0071     HPD5_REGISTER_OFFSET
0072 };
0073 
0074 static const uint32_t dig_offsets[] = {
0075     DIG0_REGISTER_OFFSET,
0076     DIG1_REGISTER_OFFSET,
0077     DIG2_REGISTER_OFFSET,
0078     DIG3_REGISTER_OFFSET,
0079     DIG4_REGISTER_OFFSET,
0080     DIG5_REGISTER_OFFSET,
0081     DIG6_REGISTER_OFFSET
0082 };
0083 
0084 static const struct {
0085     uint32_t        reg;
0086     uint32_t        vblank;
0087     uint32_t        vline;
0088     uint32_t        hpd;
0089 
0090 } interrupt_status_offsets[] = { {
0091     .reg = mmDISP_INTERRUPT_STATUS,
0092     .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
0093     .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
0094     .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
0095 }, {
0096     .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
0097     .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
0098     .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
0099     .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
0100 }, {
0101     .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
0102     .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
0103     .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
0104     .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
0105 }, {
0106     .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
0107     .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
0108     .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
0109     .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
0110 }, {
0111     .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
0112     .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
0113     .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
0114     .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
0115 }, {
0116     .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
0117     .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
0118     .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
0119     .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
0120 } };
0121 
0122 static const u32 golden_settings_tonga_a11[] =
0123 {
0124     mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0125     mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
0126     mmFBC_MISC, 0x1f311fff, 0x12300000,
0127     mmHDMI_CONTROL, 0x31000111, 0x00000011,
0128 };
0129 
0130 static const u32 tonga_mgcg_cgcg_init[] =
0131 {
0132     mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
0133     mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
0134 };
0135 
0136 static const u32 golden_settings_fiji_a10[] =
0137 {
0138     mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0139     mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
0140     mmFBC_MISC, 0x1f311fff, 0x12300000,
0141     mmHDMI_CONTROL, 0x31000111, 0x00000011,
0142 };
0143 
0144 static const u32 fiji_mgcg_cgcg_init[] =
0145 {
0146     mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
0147     mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
0148 };
0149 
0150 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
0151 {
0152     switch (adev->asic_type) {
0153     case CHIP_FIJI:
0154         amdgpu_device_program_register_sequence(adev,
0155                             fiji_mgcg_cgcg_init,
0156                             ARRAY_SIZE(fiji_mgcg_cgcg_init));
0157         amdgpu_device_program_register_sequence(adev,
0158                             golden_settings_fiji_a10,
0159                             ARRAY_SIZE(golden_settings_fiji_a10));
0160         break;
0161     case CHIP_TONGA:
0162         amdgpu_device_program_register_sequence(adev,
0163                             tonga_mgcg_cgcg_init,
0164                             ARRAY_SIZE(tonga_mgcg_cgcg_init));
0165         amdgpu_device_program_register_sequence(adev,
0166                             golden_settings_tonga_a11,
0167                             ARRAY_SIZE(golden_settings_tonga_a11));
0168         break;
0169     default:
0170         break;
0171     }
0172 }
0173 
0174 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
0175                      u32 block_offset, u32 reg)
0176 {
0177     unsigned long flags;
0178     u32 r;
0179 
0180     spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
0181     WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
0182     r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
0183     spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
0184 
0185     return r;
0186 }
0187 
0188 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
0189                       u32 block_offset, u32 reg, u32 v)
0190 {
0191     unsigned long flags;
0192 
0193     spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
0194     WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
0195     WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
0196     spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
0197 }
0198 
0199 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
0200 {
0201     if (crtc >= adev->mode_info.num_crtc)
0202         return 0;
0203     else
0204         return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
0205 }
0206 
0207 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
0208 {
0209     unsigned i;
0210 
0211     /* Enable pflip interrupts */
0212     for (i = 0; i < adev->mode_info.num_crtc; i++)
0213         amdgpu_irq_get(adev, &adev->pageflip_irq, i);
0214 }
0215 
0216 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
0217 {
0218     unsigned i;
0219 
0220     /* Disable pflip interrupts */
0221     for (i = 0; i < adev->mode_info.num_crtc; i++)
0222         amdgpu_irq_put(adev, &adev->pageflip_irq, i);
0223 }
0224 
0225 /**
0226  * dce_v10_0_page_flip - pageflip callback.
0227  *
0228  * @adev: amdgpu_device pointer
0229  * @crtc_id: crtc to cleanup pageflip on
0230  * @crtc_base: new address of the crtc (GPU MC address)
0231  * @async: asynchronous flip
0232  *
0233  * Triggers the actual pageflip by updating the primary
0234  * surface base address.
0235  */
0236 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
0237                 int crtc_id, u64 crtc_base, bool async)
0238 {
0239     struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
0240     struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
0241     u32 tmp;
0242 
0243     /* flip at hsync for async, default is vsync */
0244     tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
0245     tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
0246                 GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
0247     WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
0248     /* update pitch */
0249     WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
0250            fb->pitches[0] / fb->format->cpp[0]);
0251     /* update the primary scanout address */
0252     WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
0253            upper_32_bits(crtc_base));
0254     /* writing to the low address triggers the update */
0255     WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
0256            lower_32_bits(crtc_base));
0257     /* post the write */
0258     RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
0259 }
0260 
0261 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
0262                     u32 *vbl, u32 *position)
0263 {
0264     if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
0265         return -EINVAL;
0266 
0267     *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
0268     *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
0269 
0270     return 0;
0271 }
0272 
0273 /**
0274  * dce_v10_0_hpd_sense - hpd sense callback.
0275  *
0276  * @adev: amdgpu_device pointer
0277  * @hpd: hpd (hotplug detect) pin
0278  *
0279  * Checks if a digital monitor is connected (evergreen+).
0280  * Returns true if connected, false if not connected.
0281  */
0282 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
0283                    enum amdgpu_hpd_id hpd)
0284 {
0285     bool connected = false;
0286 
0287     if (hpd >= adev->mode_info.num_hpd)
0288         return connected;
0289 
0290     if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
0291         DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
0292         connected = true;
0293 
0294     return connected;
0295 }
0296 
0297 /**
0298  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
0299  *
0300  * @adev: amdgpu_device pointer
0301  * @hpd: hpd (hotplug detect) pin
0302  *
0303  * Set the polarity of the hpd pin (evergreen+).
0304  */
0305 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
0306                       enum amdgpu_hpd_id hpd)
0307 {
0308     u32 tmp;
0309     bool connected = dce_v10_0_hpd_sense(adev, hpd);
0310 
0311     if (hpd >= adev->mode_info.num_hpd)
0312         return;
0313 
0314     tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
0315     if (connected)
0316         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
0317     else
0318         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
0319     WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
0320 }
0321 
0322 /**
0323  * dce_v10_0_hpd_init - hpd setup callback.
0324  *
0325  * @adev: amdgpu_device pointer
0326  *
0327  * Setup the hpd pins used by the card (evergreen+).
0328  * Enable the pin, set the polarity, and enable the hpd interrupts.
0329  */
0330 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
0331 {
0332     struct drm_device *dev = adev_to_drm(adev);
0333     struct drm_connector *connector;
0334     struct drm_connector_list_iter iter;
0335     u32 tmp;
0336 
0337     drm_connector_list_iter_begin(dev, &iter);
0338     drm_for_each_connector_iter(connector, &iter) {
0339         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
0340 
0341         if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
0342             continue;
0343 
0344         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
0345             connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
0346             /* don't try to enable hpd on eDP or LVDS avoid breaking the
0347              * aux dp channel on imac and help (but not completely fix)
0348              * https://bugzilla.redhat.com/show_bug.cgi?id=726143
0349              * also avoid interrupt storms during dpms.
0350              */
0351             tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
0352             tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
0353             WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
0354             continue;
0355         }
0356 
0357         tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
0358         tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
0359         WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
0360 
0361         tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
0362         tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
0363                     DC_HPD_CONNECT_INT_DELAY,
0364                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
0365         tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
0366                     DC_HPD_DISCONNECT_INT_DELAY,
0367                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
0368         WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
0369 
0370         dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
0371         amdgpu_irq_get(adev, &adev->hpd_irq,
0372                    amdgpu_connector->hpd.hpd);
0373     }
0374     drm_connector_list_iter_end(&iter);
0375 }
0376 
0377 /**
0378  * dce_v10_0_hpd_fini - hpd tear down callback.
0379  *
0380  * @adev: amdgpu_device pointer
0381  *
0382  * Tear down the hpd pins used by the card (evergreen+).
0383  * Disable the hpd interrupts.
0384  */
0385 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
0386 {
0387     struct drm_device *dev = adev_to_drm(adev);
0388     struct drm_connector *connector;
0389     struct drm_connector_list_iter iter;
0390     u32 tmp;
0391 
0392     drm_connector_list_iter_begin(dev, &iter);
0393     drm_for_each_connector_iter(connector, &iter) {
0394         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
0395 
0396         if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
0397             continue;
0398 
0399         tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
0400         tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
0401         WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
0402 
0403         amdgpu_irq_put(adev, &adev->hpd_irq,
0404                    amdgpu_connector->hpd.hpd);
0405     }
0406     drm_connector_list_iter_end(&iter);
0407 }
0408 
0409 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
0410 {
0411     return mmDC_GPIO_HPD_A;
0412 }
0413 
0414 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
0415 {
0416     u32 crtc_hung = 0;
0417     u32 crtc_status[6];
0418     u32 i, j, tmp;
0419 
0420     for (i = 0; i < adev->mode_info.num_crtc; i++) {
0421         tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
0422         if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
0423             crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
0424             crtc_hung |= (1 << i);
0425         }
0426     }
0427 
0428     for (j = 0; j < 10; j++) {
0429         for (i = 0; i < adev->mode_info.num_crtc; i++) {
0430             if (crtc_hung & (1 << i)) {
0431                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
0432                 if (tmp != crtc_status[i])
0433                     crtc_hung &= ~(1 << i);
0434             }
0435         }
0436         if (crtc_hung == 0)
0437             return false;
0438         udelay(100);
0439     }
0440 
0441     return true;
0442 }
0443 
0444 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
0445                        bool render)
0446 {
0447     u32 tmp;
0448 
0449     /* Lockout access through VGA aperture*/
0450     tmp = RREG32(mmVGA_HDP_CONTROL);
0451     if (render)
0452         tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
0453     else
0454         tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
0455     WREG32(mmVGA_HDP_CONTROL, tmp);
0456 
0457     /* disable VGA render */
0458     tmp = RREG32(mmVGA_RENDER_CONTROL);
0459     if (render)
0460         tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
0461     else
0462         tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
0463     WREG32(mmVGA_RENDER_CONTROL, tmp);
0464 }
0465 
0466 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
0467 {
0468     int num_crtc = 0;
0469 
0470     switch (adev->asic_type) {
0471     case CHIP_FIJI:
0472     case CHIP_TONGA:
0473         num_crtc = 6;
0474         break;
0475     default:
0476         num_crtc = 0;
0477     }
0478     return num_crtc;
0479 }
0480 
0481 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
0482 {
0483     /*Disable VGA render and enabled crtc, if has DCE engine*/
0484     if (amdgpu_atombios_has_dce_engine_info(adev)) {
0485         u32 tmp;
0486         int crtc_enabled, i;
0487 
0488         dce_v10_0_set_vga_render_state(adev, false);
0489 
0490         /*Disable crtc*/
0491         for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
0492             crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
0493                                      CRTC_CONTROL, CRTC_MASTER_EN);
0494             if (crtc_enabled) {
0495                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
0496                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
0497                 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
0498                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
0499                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
0500             }
0501         }
0502     }
0503 }
0504 
0505 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
0506 {
0507     struct drm_device *dev = encoder->dev;
0508     struct amdgpu_device *adev = drm_to_adev(dev);
0509     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
0510     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
0511     struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
0512     int bpc = 0;
0513     u32 tmp = 0;
0514     enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
0515 
0516     if (connector) {
0517         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
0518         bpc = amdgpu_connector_get_monitor_bpc(connector);
0519         dither = amdgpu_connector->dither;
0520     }
0521 
0522     /* LVDS/eDP FMT is set up by atom */
0523     if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
0524         return;
0525 
0526     /* not needed for analog */
0527     if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
0528         (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
0529         return;
0530 
0531     if (bpc == 0)
0532         return;
0533 
0534     switch (bpc) {
0535     case 6:
0536         if (dither == AMDGPU_FMT_DITHER_ENABLE) {
0537             /* XXX sort out optimal dither settings */
0538             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
0539             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
0540             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
0541             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
0542         } else {
0543             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
0544             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
0545         }
0546         break;
0547     case 8:
0548         if (dither == AMDGPU_FMT_DITHER_ENABLE) {
0549             /* XXX sort out optimal dither settings */
0550             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
0551             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
0552             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
0553             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
0554             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
0555         } else {
0556             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
0557             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
0558         }
0559         break;
0560     case 10:
0561         if (dither == AMDGPU_FMT_DITHER_ENABLE) {
0562             /* XXX sort out optimal dither settings */
0563             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
0564             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
0565             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
0566             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
0567             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
0568         } else {
0569             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
0570             tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
0571         }
0572         break;
0573     default:
0574         /* not needed */
0575         break;
0576     }
0577 
0578     WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
0579 }
0580 
0581 
0582 /* display watermark setup */
0583 /**
0584  * dce_v10_0_line_buffer_adjust - Set up the line buffer
0585  *
0586  * @adev: amdgpu_device pointer
0587  * @amdgpu_crtc: the selected display controller
0588  * @mode: the current display mode on the selected display
0589  * controller
0590  *
0591  * Setup up the line buffer allocation for
0592  * the selected display controller (CIK).
0593  * Returns the line buffer size in pixels.
0594  */
0595 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
0596                        struct amdgpu_crtc *amdgpu_crtc,
0597                        struct drm_display_mode *mode)
0598 {
0599     u32 tmp, buffer_alloc, i, mem_cfg;
0600     u32 pipe_offset = amdgpu_crtc->crtc_id;
0601     /*
0602      * Line Buffer Setup
0603      * There are 6 line buffers, one for each display controllers.
0604      * There are 3 partitions per LB. Select the number of partitions
0605      * to enable based on the display width.  For display widths larger
0606      * than 4096, you need use to use 2 display controllers and combine
0607      * them using the stereo blender.
0608      */
0609     if (amdgpu_crtc->base.enabled && mode) {
0610         if (mode->crtc_hdisplay < 1920) {
0611             mem_cfg = 1;
0612             buffer_alloc = 2;
0613         } else if (mode->crtc_hdisplay < 2560) {
0614             mem_cfg = 2;
0615             buffer_alloc = 2;
0616         } else if (mode->crtc_hdisplay < 4096) {
0617             mem_cfg = 0;
0618             buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
0619         } else {
0620             DRM_DEBUG_KMS("Mode too big for LB!\n");
0621             mem_cfg = 0;
0622             buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
0623         }
0624     } else {
0625         mem_cfg = 1;
0626         buffer_alloc = 0;
0627     }
0628 
0629     tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
0630     tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
0631     WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
0632 
0633     tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
0634     tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
0635     WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
0636 
0637     for (i = 0; i < adev->usec_timeout; i++) {
0638         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
0639         if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
0640             break;
0641         udelay(1);
0642     }
0643 
0644     if (amdgpu_crtc->base.enabled && mode) {
0645         switch (mem_cfg) {
0646         case 0:
0647         default:
0648             return 4096 * 2;
0649         case 1:
0650             return 1920 * 2;
0651         case 2:
0652             return 2560 * 2;
0653         }
0654     }
0655 
0656     /* controller not enabled, so no lb used */
0657     return 0;
0658 }
0659 
0660 /**
0661  * cik_get_number_of_dram_channels - get the number of dram channels
0662  *
0663  * @adev: amdgpu_device pointer
0664  *
0665  * Look up the number of video ram channels (CIK).
0666  * Used for display watermark bandwidth calculations
0667  * Returns the number of dram channels
0668  */
0669 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
0670 {
0671     u32 tmp = RREG32(mmMC_SHARED_CHMAP);
0672 
0673     switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
0674     case 0:
0675     default:
0676         return 1;
0677     case 1:
0678         return 2;
0679     case 2:
0680         return 4;
0681     case 3:
0682         return 8;
0683     case 4:
0684         return 3;
0685     case 5:
0686         return 6;
0687     case 6:
0688         return 10;
0689     case 7:
0690         return 12;
0691     case 8:
0692         return 16;
0693     }
0694 }
0695 
0696 struct dce10_wm_params {
0697     u32 dram_channels; /* number of dram channels */
0698     u32 yclk;          /* bandwidth per dram data pin in kHz */
0699     u32 sclk;          /* engine clock in kHz */
0700     u32 disp_clk;      /* display clock in kHz */
0701     u32 src_width;     /* viewport width */
0702     u32 active_time;   /* active display time in ns */
0703     u32 blank_time;    /* blank time in ns */
0704     bool interlaced;    /* mode is interlaced */
0705     fixed20_12 vsc;    /* vertical scale ratio */
0706     u32 num_heads;     /* number of active crtcs */
0707     u32 bytes_per_pixel; /* bytes per pixel display + overlay */
0708     u32 lb_size;       /* line buffer allocated to pipe */
0709     u32 vtaps;         /* vertical scaler taps */
0710 };
0711 
0712 /**
0713  * dce_v10_0_dram_bandwidth - get the dram bandwidth
0714  *
0715  * @wm: watermark calculation data
0716  *
0717  * Calculate the raw dram bandwidth (CIK).
0718  * Used for display watermark bandwidth calculations
0719  * Returns the dram bandwidth in MBytes/s
0720  */
0721 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
0722 {
0723     /* Calculate raw DRAM Bandwidth */
0724     fixed20_12 dram_efficiency; /* 0.7 */
0725     fixed20_12 yclk, dram_channels, bandwidth;
0726     fixed20_12 a;
0727 
0728     a.full = dfixed_const(1000);
0729     yclk.full = dfixed_const(wm->yclk);
0730     yclk.full = dfixed_div(yclk, a);
0731     dram_channels.full = dfixed_const(wm->dram_channels * 4);
0732     a.full = dfixed_const(10);
0733     dram_efficiency.full = dfixed_const(7);
0734     dram_efficiency.full = dfixed_div(dram_efficiency, a);
0735     bandwidth.full = dfixed_mul(dram_channels, yclk);
0736     bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
0737 
0738     return dfixed_trunc(bandwidth);
0739 }
0740 
0741 /**
0742  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
0743  *
0744  * @wm: watermark calculation data
0745  *
0746  * Calculate the dram bandwidth used for display (CIK).
0747  * Used for display watermark bandwidth calculations
0748  * Returns the dram bandwidth for display in MBytes/s
0749  */
0750 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
0751 {
0752     /* Calculate DRAM Bandwidth and the part allocated to display. */
0753     fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
0754     fixed20_12 yclk, dram_channels, bandwidth;
0755     fixed20_12 a;
0756 
0757     a.full = dfixed_const(1000);
0758     yclk.full = dfixed_const(wm->yclk);
0759     yclk.full = dfixed_div(yclk, a);
0760     dram_channels.full = dfixed_const(wm->dram_channels * 4);
0761     a.full = dfixed_const(10);
0762     disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
0763     disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
0764     bandwidth.full = dfixed_mul(dram_channels, yclk);
0765     bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
0766 
0767     return dfixed_trunc(bandwidth);
0768 }
0769 
0770 /**
0771  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
0772  *
0773  * @wm: watermark calculation data
0774  *
0775  * Calculate the data return bandwidth used for display (CIK).
0776  * Used for display watermark bandwidth calculations
0777  * Returns the data return bandwidth in MBytes/s
0778  */
0779 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
0780 {
0781     /* Calculate the display Data return Bandwidth */
0782     fixed20_12 return_efficiency; /* 0.8 */
0783     fixed20_12 sclk, bandwidth;
0784     fixed20_12 a;
0785 
0786     a.full = dfixed_const(1000);
0787     sclk.full = dfixed_const(wm->sclk);
0788     sclk.full = dfixed_div(sclk, a);
0789     a.full = dfixed_const(10);
0790     return_efficiency.full = dfixed_const(8);
0791     return_efficiency.full = dfixed_div(return_efficiency, a);
0792     a.full = dfixed_const(32);
0793     bandwidth.full = dfixed_mul(a, sclk);
0794     bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
0795 
0796     return dfixed_trunc(bandwidth);
0797 }
0798 
0799 /**
0800  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
0801  *
0802  * @wm: watermark calculation data
0803  *
0804  * Calculate the dmif bandwidth used for display (CIK).
0805  * Used for display watermark bandwidth calculations
0806  * Returns the dmif bandwidth in MBytes/s
0807  */
0808 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
0809 {
0810     /* Calculate the DMIF Request Bandwidth */
0811     fixed20_12 disp_clk_request_efficiency; /* 0.8 */
0812     fixed20_12 disp_clk, bandwidth;
0813     fixed20_12 a, b;
0814 
0815     a.full = dfixed_const(1000);
0816     disp_clk.full = dfixed_const(wm->disp_clk);
0817     disp_clk.full = dfixed_div(disp_clk, a);
0818     a.full = dfixed_const(32);
0819     b.full = dfixed_mul(a, disp_clk);
0820 
0821     a.full = dfixed_const(10);
0822     disp_clk_request_efficiency.full = dfixed_const(8);
0823     disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
0824 
0825     bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
0826 
0827     return dfixed_trunc(bandwidth);
0828 }
0829 
0830 /**
0831  * dce_v10_0_available_bandwidth - get the min available bandwidth
0832  *
0833  * @wm: watermark calculation data
0834  *
0835  * Calculate the min available bandwidth used for display (CIK).
0836  * Used for display watermark bandwidth calculations
0837  * Returns the min available bandwidth in MBytes/s
0838  */
0839 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
0840 {
0841     /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
0842     u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
0843     u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
0844     u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
0845 
0846     return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
0847 }
0848 
0849 /**
0850  * dce_v10_0_average_bandwidth - get the average available bandwidth
0851  *
0852  * @wm: watermark calculation data
0853  *
0854  * Calculate the average available bandwidth used for display (CIK).
0855  * Used for display watermark bandwidth calculations
0856  * Returns the average available bandwidth in MBytes/s
0857  */
0858 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
0859 {
0860     /* Calculate the display mode Average Bandwidth
0861      * DisplayMode should contain the source and destination dimensions,
0862      * timing, etc.
0863      */
0864     fixed20_12 bpp;
0865     fixed20_12 line_time;
0866     fixed20_12 src_width;
0867     fixed20_12 bandwidth;
0868     fixed20_12 a;
0869 
0870     a.full = dfixed_const(1000);
0871     line_time.full = dfixed_const(wm->active_time + wm->blank_time);
0872     line_time.full = dfixed_div(line_time, a);
0873     bpp.full = dfixed_const(wm->bytes_per_pixel);
0874     src_width.full = dfixed_const(wm->src_width);
0875     bandwidth.full = dfixed_mul(src_width, bpp);
0876     bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
0877     bandwidth.full = dfixed_div(bandwidth, line_time);
0878 
0879     return dfixed_trunc(bandwidth);
0880 }
0881 
0882 /**
0883  * dce_v10_0_latency_watermark - get the latency watermark
0884  *
0885  * @wm: watermark calculation data
0886  *
0887  * Calculate the latency watermark (CIK).
0888  * Used for display watermark bandwidth calculations
0889  * Returns the latency watermark in ns
0890  */
0891 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
0892 {
0893     /* First calculate the latency in ns */
0894     u32 mc_latency = 2000; /* 2000 ns. */
0895     u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
0896     u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
0897     u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
0898     u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
0899     u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
0900         (wm->num_heads * cursor_line_pair_return_time);
0901     u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
0902     u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
0903     u32 tmp, dmif_size = 12288;
0904     fixed20_12 a, b, c;
0905 
0906     if (wm->num_heads == 0)
0907         return 0;
0908 
0909     a.full = dfixed_const(2);
0910     b.full = dfixed_const(1);
0911     if ((wm->vsc.full > a.full) ||
0912         ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
0913         (wm->vtaps >= 5) ||
0914         ((wm->vsc.full >= a.full) && wm->interlaced))
0915         max_src_lines_per_dst_line = 4;
0916     else
0917         max_src_lines_per_dst_line = 2;
0918 
0919     a.full = dfixed_const(available_bandwidth);
0920     b.full = dfixed_const(wm->num_heads);
0921     a.full = dfixed_div(a, b);
0922     tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
0923     tmp = min(dfixed_trunc(a), tmp);
0924 
0925     lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
0926 
0927     a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
0928     b.full = dfixed_const(1000);
0929     c.full = dfixed_const(lb_fill_bw);
0930     b.full = dfixed_div(c, b);
0931     a.full = dfixed_div(a, b);
0932     line_fill_time = dfixed_trunc(a);
0933 
0934     if (line_fill_time < wm->active_time)
0935         return latency;
0936     else
0937         return latency + (line_fill_time - wm->active_time);
0938 
0939 }
0940 
0941 /**
0942  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
0943  * average and available dram bandwidth
0944  *
0945  * @wm: watermark calculation data
0946  *
0947  * Check if the display average bandwidth fits in the display
0948  * dram bandwidth (CIK).
0949  * Used for display watermark bandwidth calculations
0950  * Returns true if the display fits, false if not.
0951  */
0952 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
0953 {
0954     if (dce_v10_0_average_bandwidth(wm) <=
0955         (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
0956         return true;
0957     else
0958         return false;
0959 }
0960 
0961 /**
0962  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
0963  * average and available bandwidth
0964  *
0965  * @wm: watermark calculation data
0966  *
0967  * Check if the display average bandwidth fits in the display
0968  * available bandwidth (CIK).
0969  * Used for display watermark bandwidth calculations
0970  * Returns true if the display fits, false if not.
0971  */
0972 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
0973 {
0974     if (dce_v10_0_average_bandwidth(wm) <=
0975         (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
0976         return true;
0977     else
0978         return false;
0979 }
0980 
0981 /**
0982  * dce_v10_0_check_latency_hiding - check latency hiding
0983  *
0984  * @wm: watermark calculation data
0985  *
0986  * Check latency hiding (CIK).
0987  * Used for display watermark bandwidth calculations
0988  * Returns true if the display fits, false if not.
0989  */
0990 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
0991 {
0992     u32 lb_partitions = wm->lb_size / wm->src_width;
0993     u32 line_time = wm->active_time + wm->blank_time;
0994     u32 latency_tolerant_lines;
0995     u32 latency_hiding;
0996     fixed20_12 a;
0997 
0998     a.full = dfixed_const(1);
0999     if (wm->vsc.full > a.full)
1000         latency_tolerant_lines = 1;
1001     else {
1002         if (lb_partitions <= (wm->vtaps + 1))
1003             latency_tolerant_lines = 1;
1004         else
1005             latency_tolerant_lines = 2;
1006     }
1007 
1008     latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1009 
1010     if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1011         return true;
1012     else
1013         return false;
1014 }
1015 
1016 /**
1017  * dce_v10_0_program_watermarks - program display watermarks
1018  *
1019  * @adev: amdgpu_device pointer
1020  * @amdgpu_crtc: the selected display controller
1021  * @lb_size: line buffer size
1022  * @num_heads: number of display controllers in use
1023  *
1024  * Calculate and program the display watermarks for the
1025  * selected display controller (CIK).
1026  */
1027 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1028                     struct amdgpu_crtc *amdgpu_crtc,
1029                     u32 lb_size, u32 num_heads)
1030 {
1031     struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1032     struct dce10_wm_params wm_low, wm_high;
1033     u32 active_time;
1034     u32 line_time = 0;
1035     u32 latency_watermark_a = 0, latency_watermark_b = 0;
1036     u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1037 
1038     if (amdgpu_crtc->base.enabled && num_heads && mode) {
1039         active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1040                         (u32)mode->clock);
1041         line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1042                       (u32)mode->clock);
1043         line_time = min(line_time, (u32)65535);
1044 
1045         /* watermark for high clocks */
1046         if (adev->pm.dpm_enabled) {
1047             wm_high.yclk =
1048                 amdgpu_dpm_get_mclk(adev, false) * 10;
1049             wm_high.sclk =
1050                 amdgpu_dpm_get_sclk(adev, false) * 10;
1051         } else {
1052             wm_high.yclk = adev->pm.current_mclk * 10;
1053             wm_high.sclk = adev->pm.current_sclk * 10;
1054         }
1055 
1056         wm_high.disp_clk = mode->clock;
1057         wm_high.src_width = mode->crtc_hdisplay;
1058         wm_high.active_time = active_time;
1059         wm_high.blank_time = line_time - wm_high.active_time;
1060         wm_high.interlaced = false;
1061         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1062             wm_high.interlaced = true;
1063         wm_high.vsc = amdgpu_crtc->vsc;
1064         wm_high.vtaps = 1;
1065         if (amdgpu_crtc->rmx_type != RMX_OFF)
1066             wm_high.vtaps = 2;
1067         wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1068         wm_high.lb_size = lb_size;
1069         wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1070         wm_high.num_heads = num_heads;
1071 
1072         /* set for high clocks */
1073         latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1074 
1075         /* possibly force display priority to high */
1076         /* should really do this at mode validation time... */
1077         if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1078             !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1079             !dce_v10_0_check_latency_hiding(&wm_high) ||
1080             (adev->mode_info.disp_priority == 2)) {
1081             DRM_DEBUG_KMS("force priority to high\n");
1082         }
1083 
1084         /* watermark for low clocks */
1085         if (adev->pm.dpm_enabled) {
1086             wm_low.yclk =
1087                 amdgpu_dpm_get_mclk(adev, true) * 10;
1088             wm_low.sclk =
1089                 amdgpu_dpm_get_sclk(adev, true) * 10;
1090         } else {
1091             wm_low.yclk = adev->pm.current_mclk * 10;
1092             wm_low.sclk = adev->pm.current_sclk * 10;
1093         }
1094 
1095         wm_low.disp_clk = mode->clock;
1096         wm_low.src_width = mode->crtc_hdisplay;
1097         wm_low.active_time = active_time;
1098         wm_low.blank_time = line_time - wm_low.active_time;
1099         wm_low.interlaced = false;
1100         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1101             wm_low.interlaced = true;
1102         wm_low.vsc = amdgpu_crtc->vsc;
1103         wm_low.vtaps = 1;
1104         if (amdgpu_crtc->rmx_type != RMX_OFF)
1105             wm_low.vtaps = 2;
1106         wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1107         wm_low.lb_size = lb_size;
1108         wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1109         wm_low.num_heads = num_heads;
1110 
1111         /* set for low clocks */
1112         latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1113 
1114         /* possibly force display priority to high */
1115         /* should really do this at mode validation time... */
1116         if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1117             !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1118             !dce_v10_0_check_latency_hiding(&wm_low) ||
1119             (adev->mode_info.disp_priority == 2)) {
1120             DRM_DEBUG_KMS("force priority to high\n");
1121         }
1122         lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1123     }
1124 
1125     /* select wm A */
1126     wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1127     tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1128     WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1129     tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1130     tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1131     tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1132     WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1133     /* select wm B */
1134     tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1135     WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1136     tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1137     tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1138     tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1139     WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1140     /* restore original selection */
1141     WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1142 
1143     /* save values for DPM */
1144     amdgpu_crtc->line_time = line_time;
1145     amdgpu_crtc->wm_high = latency_watermark_a;
1146     amdgpu_crtc->wm_low = latency_watermark_b;
1147     /* Save number of lines the linebuffer leads before the scanout */
1148     amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1149 }
1150 
1151 /**
1152  * dce_v10_0_bandwidth_update - program display watermarks
1153  *
1154  * @adev: amdgpu_device pointer
1155  *
1156  * Calculate and program the display watermarks and line
1157  * buffer allocation (CIK).
1158  */
1159 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1160 {
1161     struct drm_display_mode *mode = NULL;
1162     u32 num_heads = 0, lb_size;
1163     int i;
1164 
1165     amdgpu_display_update_priority(adev);
1166 
1167     for (i = 0; i < adev->mode_info.num_crtc; i++) {
1168         if (adev->mode_info.crtcs[i]->base.enabled)
1169             num_heads++;
1170     }
1171     for (i = 0; i < adev->mode_info.num_crtc; i++) {
1172         mode = &adev->mode_info.crtcs[i]->base.mode;
1173         lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1174         dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1175                         lb_size, num_heads);
1176     }
1177 }
1178 
1179 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1180 {
1181     int i;
1182     u32 offset, tmp;
1183 
1184     for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1185         offset = adev->mode_info.audio.pin[i].offset;
1186         tmp = RREG32_AUDIO_ENDPT(offset,
1187                      ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1188         if (((tmp &
1189         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1190         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1191             adev->mode_info.audio.pin[i].connected = false;
1192         else
1193             adev->mode_info.audio.pin[i].connected = true;
1194     }
1195 }
1196 
1197 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1198 {
1199     int i;
1200 
1201     dce_v10_0_audio_get_connected_pins(adev);
1202 
1203     for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1204         if (adev->mode_info.audio.pin[i].connected)
1205             return &adev->mode_info.audio.pin[i];
1206     }
1207     DRM_ERROR("No connected audio pins found!\n");
1208     return NULL;
1209 }
1210 
1211 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1212 {
1213     struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1214     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1215     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1216     u32 tmp;
1217 
1218     if (!dig || !dig->afmt || !dig->afmt->pin)
1219         return;
1220 
1221     tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1222     tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1223     WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1224 }
1225 
1226 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1227                         struct drm_display_mode *mode)
1228 {
1229     struct drm_device *dev = encoder->dev;
1230     struct amdgpu_device *adev = drm_to_adev(dev);
1231     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1232     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1233     struct drm_connector *connector;
1234     struct drm_connector_list_iter iter;
1235     struct amdgpu_connector *amdgpu_connector = NULL;
1236     u32 tmp;
1237     int interlace = 0;
1238 
1239     if (!dig || !dig->afmt || !dig->afmt->pin)
1240         return;
1241 
1242     drm_connector_list_iter_begin(dev, &iter);
1243     drm_for_each_connector_iter(connector, &iter) {
1244         if (connector->encoder == encoder) {
1245             amdgpu_connector = to_amdgpu_connector(connector);
1246             break;
1247         }
1248     }
1249     drm_connector_list_iter_end(&iter);
1250 
1251     if (!amdgpu_connector) {
1252         DRM_ERROR("Couldn't find encoder's connector\n");
1253         return;
1254     }
1255 
1256     if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1257         interlace = 1;
1258     if (connector->latency_present[interlace]) {
1259         tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1260                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1261         tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1262                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1263     } else {
1264         tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1265                     VIDEO_LIPSYNC, 0);
1266         tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1267                     AUDIO_LIPSYNC, 0);
1268     }
1269     WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1270                ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1271 }
1272 
1273 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1274 {
1275     struct drm_device *dev = encoder->dev;
1276     struct amdgpu_device *adev = drm_to_adev(dev);
1277     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1278     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1279     struct drm_connector *connector;
1280     struct drm_connector_list_iter iter;
1281     struct amdgpu_connector *amdgpu_connector = NULL;
1282     u32 tmp;
1283     u8 *sadb = NULL;
1284     int sad_count;
1285 
1286     if (!dig || !dig->afmt || !dig->afmt->pin)
1287         return;
1288 
1289     drm_connector_list_iter_begin(dev, &iter);
1290     drm_for_each_connector_iter(connector, &iter) {
1291         if (connector->encoder == encoder) {
1292             amdgpu_connector = to_amdgpu_connector(connector);
1293             break;
1294         }
1295     }
1296     drm_connector_list_iter_end(&iter);
1297 
1298     if (!amdgpu_connector) {
1299         DRM_ERROR("Couldn't find encoder's connector\n");
1300         return;
1301     }
1302 
1303     sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1304     if (sad_count < 0) {
1305         DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1306         sad_count = 0;
1307     }
1308 
1309     /* program the speaker allocation */
1310     tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1311                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1312     tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1313                 DP_CONNECTION, 0);
1314     /* set HDMI mode */
1315     tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1316                 HDMI_CONNECTION, 1);
1317     if (sad_count)
1318         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1319                     SPEAKER_ALLOCATION, sadb[0]);
1320     else
1321         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1322                     SPEAKER_ALLOCATION, 5); /* stereo */
1323     WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1324                ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1325 
1326     kfree(sadb);
1327 }
1328 
1329 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1330 {
1331     struct drm_device *dev = encoder->dev;
1332     struct amdgpu_device *adev = drm_to_adev(dev);
1333     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1334     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1335     struct drm_connector *connector;
1336     struct drm_connector_list_iter iter;
1337     struct amdgpu_connector *amdgpu_connector = NULL;
1338     struct cea_sad *sads;
1339     int i, sad_count;
1340 
1341     static const u16 eld_reg_to_type[][2] = {
1342         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1343         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1344         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1345         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1346         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1347         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1348         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1349         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1350         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1351         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1352         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1353         { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1354     };
1355 
1356     if (!dig || !dig->afmt || !dig->afmt->pin)
1357         return;
1358 
1359     drm_connector_list_iter_begin(dev, &iter);
1360     drm_for_each_connector_iter(connector, &iter) {
1361         if (connector->encoder == encoder) {
1362             amdgpu_connector = to_amdgpu_connector(connector);
1363             break;
1364         }
1365     }
1366     drm_connector_list_iter_end(&iter);
1367 
1368     if (!amdgpu_connector) {
1369         DRM_ERROR("Couldn't find encoder's connector\n");
1370         return;
1371     }
1372 
1373     sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1374     if (sad_count < 0)
1375         DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1376     if (sad_count <= 0)
1377         return;
1378     BUG_ON(!sads);
1379 
1380     for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1381         u32 tmp = 0;
1382         u8 stereo_freqs = 0;
1383         int max_channels = -1;
1384         int j;
1385 
1386         for (j = 0; j < sad_count; j++) {
1387             struct cea_sad *sad = &sads[j];
1388 
1389             if (sad->format == eld_reg_to_type[i][1]) {
1390                 if (sad->channels > max_channels) {
1391                     tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1392                                 MAX_CHANNELS, sad->channels);
1393                     tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1394                                 DESCRIPTOR_BYTE_2, sad->byte2);
1395                     tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1396                                 SUPPORTED_FREQUENCIES, sad->freq);
1397                     max_channels = sad->channels;
1398                 }
1399 
1400                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1401                     stereo_freqs |= sad->freq;
1402                 else
1403                     break;
1404             }
1405         }
1406 
1407         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1408                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1409         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1410     }
1411 
1412     kfree(sads);
1413 }
1414 
1415 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1416                   struct amdgpu_audio_pin *pin,
1417                   bool enable)
1418 {
1419     if (!pin)
1420         return;
1421 
1422     WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1423                enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1424 }
1425 
1426 static const u32 pin_offsets[] =
1427 {
1428     AUD0_REGISTER_OFFSET,
1429     AUD1_REGISTER_OFFSET,
1430     AUD2_REGISTER_OFFSET,
1431     AUD3_REGISTER_OFFSET,
1432     AUD4_REGISTER_OFFSET,
1433     AUD5_REGISTER_OFFSET,
1434     AUD6_REGISTER_OFFSET,
1435 };
1436 
1437 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1438 {
1439     int i;
1440 
1441     if (!amdgpu_audio)
1442         return 0;
1443 
1444     adev->mode_info.audio.enabled = true;
1445 
1446     adev->mode_info.audio.num_pins = 7;
1447 
1448     for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1449         adev->mode_info.audio.pin[i].channels = -1;
1450         adev->mode_info.audio.pin[i].rate = -1;
1451         adev->mode_info.audio.pin[i].bits_per_sample = -1;
1452         adev->mode_info.audio.pin[i].status_bits = 0;
1453         adev->mode_info.audio.pin[i].category_code = 0;
1454         adev->mode_info.audio.pin[i].connected = false;
1455         adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1456         adev->mode_info.audio.pin[i].id = i;
1457         /* disable audio.  it will be set up later */
1458         /* XXX remove once we switch to ip funcs */
1459         dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1460     }
1461 
1462     return 0;
1463 }
1464 
1465 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1466 {
1467     int i;
1468 
1469     if (!amdgpu_audio)
1470         return;
1471 
1472     if (!adev->mode_info.audio.enabled)
1473         return;
1474 
1475     for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1476         dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1477 
1478     adev->mode_info.audio.enabled = false;
1479 }
1480 
1481 /*
1482  * update the N and CTS parameters for a given pixel clock rate
1483  */
1484 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1485 {
1486     struct drm_device *dev = encoder->dev;
1487     struct amdgpu_device *adev = drm_to_adev(dev);
1488     struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1489     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1490     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1491     u32 tmp;
1492 
1493     tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1494     tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1495     WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1496     tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1497     tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1498     WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1499 
1500     tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1501     tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1502     WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1503     tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1504     tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1505     WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1506 
1507     tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1508     tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1509     WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1510     tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1511     tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1512     WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1513 
1514 }
1515 
1516 /*
1517  * build a HDMI Video Info Frame
1518  */
1519 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1520                            void *buffer, size_t size)
1521 {
1522     struct drm_device *dev = encoder->dev;
1523     struct amdgpu_device *adev = drm_to_adev(dev);
1524     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1525     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1526     uint8_t *frame = buffer + 3;
1527     uint8_t *header = buffer;
1528 
1529     WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1530         frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1531     WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1532         frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1533     WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1534         frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1535     WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1536         frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1537 }
1538 
1539 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1540 {
1541     struct drm_device *dev = encoder->dev;
1542     struct amdgpu_device *adev = drm_to_adev(dev);
1543     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1544     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1545     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1546     u32 dto_phase = 24 * 1000;
1547     u32 dto_modulo = clock;
1548     u32 tmp;
1549 
1550     if (!dig || !dig->afmt)
1551         return;
1552 
1553     /* XXX two dtos; generally use dto0 for hdmi */
1554     /* Express [24MHz / target pixel clock] as an exact rational
1555      * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1556      * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1557      */
1558     tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1559     tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1560                 amdgpu_crtc->crtc_id);
1561     WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1562     WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1563     WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1564 }
1565 
1566 /*
1567  * update the info frames with the data from the current display mode
1568  */
1569 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1570                   struct drm_display_mode *mode)
1571 {
1572     struct drm_device *dev = encoder->dev;
1573     struct amdgpu_device *adev = drm_to_adev(dev);
1574     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1575     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1576     struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1577     u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1578     struct hdmi_avi_infoframe frame;
1579     ssize_t err;
1580     u32 tmp;
1581     int bpc = 8;
1582 
1583     if (!dig || !dig->afmt)
1584         return;
1585 
1586     /* Silent, r600_hdmi_enable will raise WARN for us */
1587     if (!dig->afmt->enabled)
1588         return;
1589 
1590     /* hdmi deep color mode general control packets setup, if bpc > 8 */
1591     if (encoder->crtc) {
1592         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1593         bpc = amdgpu_crtc->bpc;
1594     }
1595 
1596     /* disable audio prior to setting up hw */
1597     dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1598     dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1599 
1600     dce_v10_0_audio_set_dto(encoder, mode->clock);
1601 
1602     tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1603     tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1604     WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1605 
1606     WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1607 
1608     tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1609     switch (bpc) {
1610     case 0:
1611     case 6:
1612     case 8:
1613     case 16:
1614     default:
1615         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1616         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1617         DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1618               connector->name, bpc);
1619         break;
1620     case 10:
1621         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1622         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1623         DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1624               connector->name);
1625         break;
1626     case 12:
1627         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1628         tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1629         DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1630               connector->name);
1631         break;
1632     }
1633     WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1634 
1635     tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1636     tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1637     tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1638     tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1639     WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1640 
1641     tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1642     /* enable audio info frames (frames won't be set until audio is enabled) */
1643     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1644     /* required for audio info values to be updated */
1645     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1646     WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1647 
1648     tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1649     /* required for audio info values to be updated */
1650     tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1651     WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1652 
1653     tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1654     /* anything other than 0 */
1655     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1656     WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1657 
1658     WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1659 
1660     tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1661     /* set the default audio delay */
1662     tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1663     /* should be suffient for all audio modes and small enough for all hblanks */
1664     tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1665     WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1666 
1667     tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1668     /* allow 60958 channel status fields to be updated */
1669     tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1670     WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1671 
1672     tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1673     if (bpc > 8)
1674         /* clear SW CTS value */
1675         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1676     else
1677         /* select SW CTS value */
1678         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1679     /* allow hw to sent ACR packets when required */
1680     tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1681     WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1682 
1683     dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1684 
1685     tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1686     tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1687     WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1688 
1689     tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1690     tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1691     WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1692 
1693     tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1694     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1695     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1696     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1697     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1698     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1699     tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1700     WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1701 
1702     dce_v10_0_audio_write_speaker_allocation(encoder);
1703 
1704     WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1705            (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1706 
1707     dce_v10_0_afmt_audio_select_pin(encoder);
1708     dce_v10_0_audio_write_sad_regs(encoder);
1709     dce_v10_0_audio_write_latency_fields(encoder, mode);
1710 
1711     err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1712     if (err < 0) {
1713         DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1714         return;
1715     }
1716 
1717     err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1718     if (err < 0) {
1719         DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1720         return;
1721     }
1722 
1723     dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1724 
1725     tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1726     /* enable AVI info frames */
1727     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1728     /* required for audio info values to be updated */
1729     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1730     WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1731 
1732     tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1733     tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1734     WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1735 
1736     tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1737     /* send audio packets */
1738     tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1739     WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1740 
1741     WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1742     WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1743     WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1744     WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1745 
1746     /* enable audio after to setting up hw */
1747     dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1748 }
1749 
1750 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1751 {
1752     struct drm_device *dev = encoder->dev;
1753     struct amdgpu_device *adev = drm_to_adev(dev);
1754     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1755     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1756 
1757     if (!dig || !dig->afmt)
1758         return;
1759 
1760     /* Silent, r600_hdmi_enable will raise WARN for us */
1761     if (enable && dig->afmt->enabled)
1762         return;
1763     if (!enable && !dig->afmt->enabled)
1764         return;
1765 
1766     if (!enable && dig->afmt->pin) {
1767         dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1768         dig->afmt->pin = NULL;
1769     }
1770 
1771     dig->afmt->enabled = enable;
1772 
1773     DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1774           enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1775 }
1776 
1777 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1778 {
1779     int i;
1780 
1781     for (i = 0; i < adev->mode_info.num_dig; i++)
1782         adev->mode_info.afmt[i] = NULL;
1783 
1784     /* DCE10 has audio blocks tied to DIG encoders */
1785     for (i = 0; i < adev->mode_info.num_dig; i++) {
1786         adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1787         if (adev->mode_info.afmt[i]) {
1788             adev->mode_info.afmt[i]->offset = dig_offsets[i];
1789             adev->mode_info.afmt[i]->id = i;
1790         } else {
1791             int j;
1792             for (j = 0; j < i; j++) {
1793                 kfree(adev->mode_info.afmt[j]);
1794                 adev->mode_info.afmt[j] = NULL;
1795             }
1796             return -ENOMEM;
1797         }
1798     }
1799     return 0;
1800 }
1801 
1802 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1803 {
1804     int i;
1805 
1806     for (i = 0; i < adev->mode_info.num_dig; i++) {
1807         kfree(adev->mode_info.afmt[i]);
1808         adev->mode_info.afmt[i] = NULL;
1809     }
1810 }
1811 
1812 static const u32 vga_control_regs[6] =
1813 {
1814     mmD1VGA_CONTROL,
1815     mmD2VGA_CONTROL,
1816     mmD3VGA_CONTROL,
1817     mmD4VGA_CONTROL,
1818     mmD5VGA_CONTROL,
1819     mmD6VGA_CONTROL,
1820 };
1821 
1822 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1823 {
1824     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1825     struct drm_device *dev = crtc->dev;
1826     struct amdgpu_device *adev = drm_to_adev(dev);
1827     u32 vga_control;
1828 
1829     vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1830     if (enable)
1831         WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1832     else
1833         WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1834 }
1835 
1836 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1837 {
1838     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1839     struct drm_device *dev = crtc->dev;
1840     struct amdgpu_device *adev = drm_to_adev(dev);
1841 
1842     if (enable)
1843         WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1844     else
1845         WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1846 }
1847 
1848 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1849                      struct drm_framebuffer *fb,
1850                      int x, int y, int atomic)
1851 {
1852     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1853     struct drm_device *dev = crtc->dev;
1854     struct amdgpu_device *adev = drm_to_adev(dev);
1855     struct drm_framebuffer *target_fb;
1856     struct drm_gem_object *obj;
1857     struct amdgpu_bo *abo;
1858     uint64_t fb_location, tiling_flags;
1859     uint32_t fb_format, fb_pitch_pixels;
1860     u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1861     u32 pipe_config;
1862     u32 tmp, viewport_w, viewport_h;
1863     int r;
1864     bool bypass_lut = false;
1865 
1866     /* no fb bound */
1867     if (!atomic && !crtc->primary->fb) {
1868         DRM_DEBUG_KMS("No FB bound\n");
1869         return 0;
1870     }
1871 
1872     if (atomic)
1873         target_fb = fb;
1874     else
1875         target_fb = crtc->primary->fb;
1876 
1877     /* If atomic, assume fb object is pinned & idle & fenced and
1878      * just update base pointers
1879      */
1880     obj = target_fb->obj[0];
1881     abo = gem_to_amdgpu_bo(obj);
1882     r = amdgpu_bo_reserve(abo, false);
1883     if (unlikely(r != 0))
1884         return r;
1885 
1886     if (!atomic) {
1887         r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1888         if (unlikely(r != 0)) {
1889             amdgpu_bo_unreserve(abo);
1890             return -EINVAL;
1891         }
1892     }
1893     fb_location = amdgpu_bo_gpu_offset(abo);
1894 
1895     amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1896     amdgpu_bo_unreserve(abo);
1897 
1898     pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1899 
1900     switch (target_fb->format->format) {
1901     case DRM_FORMAT_C8:
1902         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1903         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1904         break;
1905     case DRM_FORMAT_XRGB4444:
1906     case DRM_FORMAT_ARGB4444:
1907         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1908         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1909 #ifdef __BIG_ENDIAN
1910         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1911                     ENDIAN_8IN16);
1912 #endif
1913         break;
1914     case DRM_FORMAT_XRGB1555:
1915     case DRM_FORMAT_ARGB1555:
1916         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1917         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1918 #ifdef __BIG_ENDIAN
1919         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1920                     ENDIAN_8IN16);
1921 #endif
1922         break;
1923     case DRM_FORMAT_BGRX5551:
1924     case DRM_FORMAT_BGRA5551:
1925         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1926         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1927 #ifdef __BIG_ENDIAN
1928         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1929                     ENDIAN_8IN16);
1930 #endif
1931         break;
1932     case DRM_FORMAT_RGB565:
1933         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1934         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1935 #ifdef __BIG_ENDIAN
1936         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1937                     ENDIAN_8IN16);
1938 #endif
1939         break;
1940     case DRM_FORMAT_XRGB8888:
1941     case DRM_FORMAT_ARGB8888:
1942         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1943         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1944 #ifdef __BIG_ENDIAN
1945         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1946                     ENDIAN_8IN32);
1947 #endif
1948         break;
1949     case DRM_FORMAT_XRGB2101010:
1950     case DRM_FORMAT_ARGB2101010:
1951         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1952         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1953 #ifdef __BIG_ENDIAN
1954         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1955                     ENDIAN_8IN32);
1956 #endif
1957         /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1958         bypass_lut = true;
1959         break;
1960     case DRM_FORMAT_BGRX1010102:
1961     case DRM_FORMAT_BGRA1010102:
1962         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1963         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1964 #ifdef __BIG_ENDIAN
1965         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1966                     ENDIAN_8IN32);
1967 #endif
1968         /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1969         bypass_lut = true;
1970         break;
1971     case DRM_FORMAT_XBGR8888:
1972     case DRM_FORMAT_ABGR8888:
1973         fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1974         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1975         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1976         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1977 #ifdef __BIG_ENDIAN
1978         fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1979                     ENDIAN_8IN32);
1980 #endif
1981         break;
1982     default:
1983         DRM_ERROR("Unsupported screen format %p4cc\n",
1984               &target_fb->format->format);
1985         return -EINVAL;
1986     }
1987 
1988     if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1989         unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1990 
1991         bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1992         bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1993         mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1994         tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1995         num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1996 
1997         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
1998         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1999                       ARRAY_2D_TILED_THIN1);
2000         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2001                       tile_split);
2002         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2003         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2004         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2005                       mtaspect);
2006         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2007                       ADDR_SURF_MICRO_TILING_DISPLAY);
2008     } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2009         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2010                       ARRAY_1D_TILED_THIN1);
2011     }
2012 
2013     fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2014                   pipe_config);
2015 
2016     dce_v10_0_vga_enable(crtc, false);
2017 
2018     /* Make sure surface address is updated at vertical blank rather than
2019      * horizontal blank
2020      */
2021     tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2022     tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2023                 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2024     WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2025 
2026     WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2027            upper_32_bits(fb_location));
2028     WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2029            upper_32_bits(fb_location));
2030     WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2031            (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2032     WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2033            (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2034     WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2035     WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2036 
2037     /*
2038      * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2039      * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2040      * retain the full precision throughout the pipeline.
2041      */
2042     tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2043     if (bypass_lut)
2044         tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2045     else
2046         tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2047     WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2048 
2049     if (bypass_lut)
2050         DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2051 
2052     WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2053     WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2054     WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2055     WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2056     WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2057     WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2058 
2059     fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2060     WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2061 
2062     dce_v10_0_grph_enable(crtc, true);
2063 
2064     WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2065            target_fb->height);
2066 
2067     x &= ~3;
2068     y &= ~1;
2069     WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2070            (x << 16) | y);
2071     viewport_w = crtc->mode.hdisplay;
2072     viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2073     WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2074            (viewport_w << 16) | viewport_h);
2075 
2076     /* set pageflip to happen anywhere in vblank interval */
2077     WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2078 
2079     if (!atomic && fb && fb != crtc->primary->fb) {
2080         abo = gem_to_amdgpu_bo(fb->obj[0]);
2081         r = amdgpu_bo_reserve(abo, true);
2082         if (unlikely(r != 0))
2083             return r;
2084         amdgpu_bo_unpin(abo);
2085         amdgpu_bo_unreserve(abo);
2086     }
2087 
2088     /* Bytes per pixel may have changed */
2089     dce_v10_0_bandwidth_update(adev);
2090 
2091     return 0;
2092 }
2093 
2094 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2095                      struct drm_display_mode *mode)
2096 {
2097     struct drm_device *dev = crtc->dev;
2098     struct amdgpu_device *adev = drm_to_adev(dev);
2099     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2100     u32 tmp;
2101 
2102     tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2103     if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2104         tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2105     else
2106         tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2107     WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2108 }
2109 
2110 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2111 {
2112     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2113     struct drm_device *dev = crtc->dev;
2114     struct amdgpu_device *adev = drm_to_adev(dev);
2115     u16 *r, *g, *b;
2116     int i;
2117     u32 tmp;
2118 
2119     DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2120 
2121     tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2122     tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2123     tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2124     WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2125 
2126     tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2127     tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2128     WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2129 
2130     tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2131     tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2132     WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2133 
2134     tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2135     tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2136     tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2137     WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2138 
2139     WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2140 
2141     WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2142     WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2143     WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2144 
2145     WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2146     WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2147     WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2148 
2149     WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2150     WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2151 
2152     WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2153     r = crtc->gamma_store;
2154     g = r + crtc->gamma_size;
2155     b = g + crtc->gamma_size;
2156     for (i = 0; i < 256; i++) {
2157         WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2158                ((*r++ & 0xffc0) << 14) |
2159                ((*g++ & 0xffc0) << 4) |
2160                (*b++ >> 6));
2161     }
2162 
2163     tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2164     tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2165     tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2166     tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2167     WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2168 
2169     tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2170     tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2171     tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2172     WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2173 
2174     tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2175     tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2176     tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2177     WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2178 
2179     tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2180     tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2181     tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2182     WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2183 
2184     /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2185     WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2186     /* XXX this only needs to be programmed once per crtc at startup,
2187      * not sure where the best place for it is
2188      */
2189     tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2190     tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2191     WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2192 }
2193 
2194 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2195 {
2196     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2197     struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2198 
2199     switch (amdgpu_encoder->encoder_id) {
2200     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2201         if (dig->linkb)
2202             return 1;
2203         else
2204             return 0;
2205     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2206         if (dig->linkb)
2207             return 3;
2208         else
2209             return 2;
2210     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2211         if (dig->linkb)
2212             return 5;
2213         else
2214             return 4;
2215     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2216         return 6;
2217     default:
2218         DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2219         return 0;
2220     }
2221 }
2222 
2223 /**
2224  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2225  *
2226  * @crtc: drm crtc
2227  *
2228  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2229  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2230  * monitors a dedicated PPLL must be used.  If a particular board has
2231  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2232  * as there is no need to program the PLL itself.  If we are not able to
2233  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2234  * avoid messing up an existing monitor.
2235  *
2236  * Asic specific PLL information
2237  *
2238  * DCE 10.x
2239  * Tonga
2240  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2241  * CI
2242  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2243  *
2244  */
2245 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2246 {
2247     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2248     struct drm_device *dev = crtc->dev;
2249     struct amdgpu_device *adev = drm_to_adev(dev);
2250     u32 pll_in_use;
2251     int pll;
2252 
2253     if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2254         if (adev->clock.dp_extclk)
2255             /* skip PPLL programming if using ext clock */
2256             return ATOM_PPLL_INVALID;
2257         else {
2258             /* use the same PPLL for all DP monitors */
2259             pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2260             if (pll != ATOM_PPLL_INVALID)
2261                 return pll;
2262         }
2263     } else {
2264         /* use the same PPLL for all monitors with the same clock */
2265         pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2266         if (pll != ATOM_PPLL_INVALID)
2267             return pll;
2268     }
2269 
2270     /* DCE10 has PPLL0, PPLL1, and PPLL2 */
2271     pll_in_use = amdgpu_pll_get_use_mask(crtc);
2272     if (!(pll_in_use & (1 << ATOM_PPLL2)))
2273         return ATOM_PPLL2;
2274     if (!(pll_in_use & (1 << ATOM_PPLL1)))
2275         return ATOM_PPLL1;
2276     if (!(pll_in_use & (1 << ATOM_PPLL0)))
2277         return ATOM_PPLL0;
2278     DRM_ERROR("unable to allocate a PPLL\n");
2279     return ATOM_PPLL_INVALID;
2280 }
2281 
2282 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2283 {
2284     struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2285     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2286     uint32_t cur_lock;
2287 
2288     cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2289     if (lock)
2290         cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2291     else
2292         cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2293     WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2294 }
2295 
2296 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2297 {
2298     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2299     struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2300     u32 tmp;
2301 
2302     tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2303     tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2304     WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2305 }
2306 
2307 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2308 {
2309     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2310     struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2311     u32 tmp;
2312 
2313     WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2314            upper_32_bits(amdgpu_crtc->cursor_addr));
2315     WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2316            lower_32_bits(amdgpu_crtc->cursor_addr));
2317 
2318     tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2319     tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2320     tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2321     WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2322 }
2323 
2324 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2325                     int x, int y)
2326 {
2327     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2328     struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2329     int xorigin = 0, yorigin = 0;
2330 
2331     amdgpu_crtc->cursor_x = x;
2332     amdgpu_crtc->cursor_y = y;
2333 
2334     /* avivo cursor are offset into the total surface */
2335     x += crtc->x;
2336     y += crtc->y;
2337     DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2338 
2339     if (x < 0) {
2340         xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2341         x = 0;
2342     }
2343     if (y < 0) {
2344         yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2345         y = 0;
2346     }
2347 
2348     WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2349     WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2350     WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2351            ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2352 
2353     return 0;
2354 }
2355 
2356 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2357                       int x, int y)
2358 {
2359     int ret;
2360 
2361     dce_v10_0_lock_cursor(crtc, true);
2362     ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2363     dce_v10_0_lock_cursor(crtc, false);
2364 
2365     return ret;
2366 }
2367 
2368 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2369                       struct drm_file *file_priv,
2370                       uint32_t handle,
2371                       uint32_t width,
2372                       uint32_t height,
2373                       int32_t hot_x,
2374                       int32_t hot_y)
2375 {
2376     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2377     struct drm_gem_object *obj;
2378     struct amdgpu_bo *aobj;
2379     int ret;
2380 
2381     if (!handle) {
2382         /* turn off cursor */
2383         dce_v10_0_hide_cursor(crtc);
2384         obj = NULL;
2385         goto unpin;
2386     }
2387 
2388     if ((width > amdgpu_crtc->max_cursor_width) ||
2389         (height > amdgpu_crtc->max_cursor_height)) {
2390         DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2391         return -EINVAL;
2392     }
2393 
2394     obj = drm_gem_object_lookup(file_priv, handle);
2395     if (!obj) {
2396         DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2397         return -ENOENT;
2398     }
2399 
2400     aobj = gem_to_amdgpu_bo(obj);
2401     ret = amdgpu_bo_reserve(aobj, false);
2402     if (ret != 0) {
2403         drm_gem_object_put(obj);
2404         return ret;
2405     }
2406 
2407     ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2408     amdgpu_bo_unreserve(aobj);
2409     if (ret) {
2410         DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2411         drm_gem_object_put(obj);
2412         return ret;
2413     }
2414     amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2415 
2416     dce_v10_0_lock_cursor(crtc, true);
2417 
2418     if (width != amdgpu_crtc->cursor_width ||
2419         height != amdgpu_crtc->cursor_height ||
2420         hot_x != amdgpu_crtc->cursor_hot_x ||
2421         hot_y != amdgpu_crtc->cursor_hot_y) {
2422         int x, y;
2423 
2424         x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2425         y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2426 
2427         dce_v10_0_cursor_move_locked(crtc, x, y);
2428 
2429         amdgpu_crtc->cursor_width = width;
2430         amdgpu_crtc->cursor_height = height;
2431         amdgpu_crtc->cursor_hot_x = hot_x;
2432         amdgpu_crtc->cursor_hot_y = hot_y;
2433     }
2434 
2435     dce_v10_0_show_cursor(crtc);
2436     dce_v10_0_lock_cursor(crtc, false);
2437 
2438 unpin:
2439     if (amdgpu_crtc->cursor_bo) {
2440         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2441         ret = amdgpu_bo_reserve(aobj, true);
2442         if (likely(ret == 0)) {
2443             amdgpu_bo_unpin(aobj);
2444             amdgpu_bo_unreserve(aobj);
2445         }
2446         drm_gem_object_put(amdgpu_crtc->cursor_bo);
2447     }
2448 
2449     amdgpu_crtc->cursor_bo = obj;
2450     return 0;
2451 }
2452 
2453 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2454 {
2455     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2456 
2457     if (amdgpu_crtc->cursor_bo) {
2458         dce_v10_0_lock_cursor(crtc, true);
2459 
2460         dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2461                          amdgpu_crtc->cursor_y);
2462 
2463         dce_v10_0_show_cursor(crtc);
2464 
2465         dce_v10_0_lock_cursor(crtc, false);
2466     }
2467 }
2468 
2469 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2470                     u16 *blue, uint32_t size,
2471                     struct drm_modeset_acquire_ctx *ctx)
2472 {
2473     dce_v10_0_crtc_load_lut(crtc);
2474 
2475     return 0;
2476 }
2477 
2478 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2479 {
2480     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2481 
2482     drm_crtc_cleanup(crtc);
2483     kfree(amdgpu_crtc);
2484 }
2485 
2486 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2487     .cursor_set2 = dce_v10_0_crtc_cursor_set2,
2488     .cursor_move = dce_v10_0_crtc_cursor_move,
2489     .gamma_set = dce_v10_0_crtc_gamma_set,
2490     .set_config = amdgpu_display_crtc_set_config,
2491     .destroy = dce_v10_0_crtc_destroy,
2492     .page_flip_target = amdgpu_display_crtc_page_flip_target,
2493     .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2494     .enable_vblank = amdgpu_enable_vblank_kms,
2495     .disable_vblank = amdgpu_disable_vblank_kms,
2496     .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2497 };
2498 
2499 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2500 {
2501     struct drm_device *dev = crtc->dev;
2502     struct amdgpu_device *adev = drm_to_adev(dev);
2503     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2504     unsigned type;
2505 
2506     switch (mode) {
2507     case DRM_MODE_DPMS_ON:
2508         amdgpu_crtc->enabled = true;
2509         amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2510         dce_v10_0_vga_enable(crtc, true);
2511         amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2512         dce_v10_0_vga_enable(crtc, false);
2513         /* Make sure VBLANK and PFLIP interrupts are still enabled */
2514         type = amdgpu_display_crtc_idx_to_irq_type(adev,
2515                         amdgpu_crtc->crtc_id);
2516         amdgpu_irq_update(adev, &adev->crtc_irq, type);
2517         amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2518         drm_crtc_vblank_on(crtc);
2519         dce_v10_0_crtc_load_lut(crtc);
2520         break;
2521     case DRM_MODE_DPMS_STANDBY:
2522     case DRM_MODE_DPMS_SUSPEND:
2523     case DRM_MODE_DPMS_OFF:
2524         drm_crtc_vblank_off(crtc);
2525         if (amdgpu_crtc->enabled) {
2526             dce_v10_0_vga_enable(crtc, true);
2527             amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2528             dce_v10_0_vga_enable(crtc, false);
2529         }
2530         amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2531         amdgpu_crtc->enabled = false;
2532         break;
2533     }
2534     /* adjust pm to dpms */
2535     amdgpu_dpm_compute_clocks(adev);
2536 }
2537 
2538 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2539 {
2540     /* disable crtc pair power gating before programming */
2541     amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2542     amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2543     dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2544 }
2545 
2546 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2547 {
2548     dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2549     amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2550 }
2551 
2552 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2553 {
2554     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2555     struct drm_device *dev = crtc->dev;
2556     struct amdgpu_device *adev = drm_to_adev(dev);
2557     struct amdgpu_atom_ss ss;
2558     int i;
2559 
2560     dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2561     if (crtc->primary->fb) {
2562         int r;
2563         struct amdgpu_bo *abo;
2564 
2565         abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2566         r = amdgpu_bo_reserve(abo, true);
2567         if (unlikely(r))
2568             DRM_ERROR("failed to reserve abo before unpin\n");
2569         else {
2570             amdgpu_bo_unpin(abo);
2571             amdgpu_bo_unreserve(abo);
2572         }
2573     }
2574     /* disable the GRPH */
2575     dce_v10_0_grph_enable(crtc, false);
2576 
2577     amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2578 
2579     for (i = 0; i < adev->mode_info.num_crtc; i++) {
2580         if (adev->mode_info.crtcs[i] &&
2581             adev->mode_info.crtcs[i]->enabled &&
2582             i != amdgpu_crtc->crtc_id &&
2583             amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2584             /* one other crtc is using this pll don't turn
2585              * off the pll
2586              */
2587             goto done;
2588         }
2589     }
2590 
2591     switch (amdgpu_crtc->pll_id) {
2592     case ATOM_PPLL0:
2593     case ATOM_PPLL1:
2594     case ATOM_PPLL2:
2595         /* disable the ppll */
2596         amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2597                       0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2598         break;
2599     default:
2600         break;
2601     }
2602 done:
2603     amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2604     amdgpu_crtc->adjusted_clock = 0;
2605     amdgpu_crtc->encoder = NULL;
2606     amdgpu_crtc->connector = NULL;
2607 }
2608 
2609 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2610                   struct drm_display_mode *mode,
2611                   struct drm_display_mode *adjusted_mode,
2612                   int x, int y, struct drm_framebuffer *old_fb)
2613 {
2614     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2615 
2616     if (!amdgpu_crtc->adjusted_clock)
2617         return -EINVAL;
2618 
2619     amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2620     amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2621     dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2622     amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2623     amdgpu_atombios_crtc_scaler_setup(crtc);
2624     dce_v10_0_cursor_reset(crtc);
2625     /* update the hw version fpr dpm */
2626     amdgpu_crtc->hw_mode = *adjusted_mode;
2627 
2628     return 0;
2629 }
2630 
2631 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2632                      const struct drm_display_mode *mode,
2633                      struct drm_display_mode *adjusted_mode)
2634 {
2635     struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2636     struct drm_device *dev = crtc->dev;
2637     struct drm_encoder *encoder;
2638 
2639     /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2640     list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2641         if (encoder->crtc == crtc) {
2642             amdgpu_crtc->encoder = encoder;
2643             amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2644             break;
2645         }
2646     }
2647     if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2648         amdgpu_crtc->encoder = NULL;
2649         amdgpu_crtc->connector = NULL;
2650         return false;
2651     }
2652     if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2653         return false;
2654     if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2655         return false;
2656     /* pick pll */
2657     amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2658     /* if we can't get a PPLL for a non-DP encoder, fail */
2659     if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2660         !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2661         return false;
2662 
2663     return true;
2664 }
2665 
2666 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2667                   struct drm_framebuffer *old_fb)
2668 {
2669     return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2670 }
2671 
2672 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2673                      struct drm_framebuffer *fb,
2674                      int x, int y, enum mode_set_atomic state)
2675 {
2676     return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2677 }
2678 
2679 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2680     .dpms = dce_v10_0_crtc_dpms,
2681     .mode_fixup = dce_v10_0_crtc_mode_fixup,
2682     .mode_set = dce_v10_0_crtc_mode_set,
2683     .mode_set_base = dce_v10_0_crtc_set_base,
2684     .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2685     .prepare = dce_v10_0_crtc_prepare,
2686     .commit = dce_v10_0_crtc_commit,
2687     .disable = dce_v10_0_crtc_disable,
2688     .get_scanout_position = amdgpu_crtc_get_scanout_position,
2689 };
2690 
2691 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2692 {
2693     struct amdgpu_crtc *amdgpu_crtc;
2694 
2695     amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2696                   (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2697     if (amdgpu_crtc == NULL)
2698         return -ENOMEM;
2699 
2700     drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2701 
2702     drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2703     amdgpu_crtc->crtc_id = index;
2704     adev->mode_info.crtcs[index] = amdgpu_crtc;
2705 
2706     amdgpu_crtc->max_cursor_width = 128;
2707     amdgpu_crtc->max_cursor_height = 128;
2708     adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2709     adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2710 
2711     switch (amdgpu_crtc->crtc_id) {
2712     case 0:
2713     default:
2714         amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2715         break;
2716     case 1:
2717         amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2718         break;
2719     case 2:
2720         amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2721         break;
2722     case 3:
2723         amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2724         break;
2725     case 4:
2726         amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2727         break;
2728     case 5:
2729         amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2730         break;
2731     }
2732 
2733     amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2734     amdgpu_crtc->adjusted_clock = 0;
2735     amdgpu_crtc->encoder = NULL;
2736     amdgpu_crtc->connector = NULL;
2737     drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2738 
2739     return 0;
2740 }
2741 
2742 static int dce_v10_0_early_init(void *handle)
2743 {
2744     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2745 
2746     adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2747     adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2748 
2749     dce_v10_0_set_display_funcs(adev);
2750 
2751     adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2752 
2753     switch (adev->asic_type) {
2754     case CHIP_FIJI:
2755     case CHIP_TONGA:
2756         adev->mode_info.num_hpd = 6;
2757         adev->mode_info.num_dig = 7;
2758         break;
2759     default:
2760         /* FIXME: not supported yet */
2761         return -EINVAL;
2762     }
2763 
2764     dce_v10_0_set_irq_funcs(adev);
2765 
2766     return 0;
2767 }
2768 
2769 static int dce_v10_0_sw_init(void *handle)
2770 {
2771     int r, i;
2772     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2773 
2774     for (i = 0; i < adev->mode_info.num_crtc; i++) {
2775         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2776         if (r)
2777             return r;
2778     }
2779 
2780     for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2781         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2782         if (r)
2783             return r;
2784     }
2785 
2786     /* HPD hotplug */
2787     r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2788     if (r)
2789         return r;
2790 
2791     adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2792 
2793     adev_to_drm(adev)->mode_config.async_page_flip = true;
2794 
2795     adev_to_drm(adev)->mode_config.max_width = 16384;
2796     adev_to_drm(adev)->mode_config.max_height = 16384;
2797 
2798     adev_to_drm(adev)->mode_config.preferred_depth = 24;
2799     adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2800 
2801     adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2802 
2803     adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2804 
2805     r = amdgpu_display_modeset_create_props(adev);
2806     if (r)
2807         return r;
2808 
2809     adev_to_drm(adev)->mode_config.max_width = 16384;
2810     adev_to_drm(adev)->mode_config.max_height = 16384;
2811 
2812     /* allocate crtcs */
2813     for (i = 0; i < adev->mode_info.num_crtc; i++) {
2814         r = dce_v10_0_crtc_init(adev, i);
2815         if (r)
2816             return r;
2817     }
2818 
2819     if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2820         amdgpu_display_print_display_setup(adev_to_drm(adev));
2821     else
2822         return -EINVAL;
2823 
2824     /* setup afmt */
2825     r = dce_v10_0_afmt_init(adev);
2826     if (r)
2827         return r;
2828 
2829     r = dce_v10_0_audio_init(adev);
2830     if (r)
2831         return r;
2832 
2833     drm_kms_helper_poll_init(adev_to_drm(adev));
2834 
2835     adev->mode_info.mode_config_initialized = true;
2836     return 0;
2837 }
2838 
2839 static int dce_v10_0_sw_fini(void *handle)
2840 {
2841     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2842 
2843     kfree(adev->mode_info.bios_hardcoded_edid);
2844 
2845     drm_kms_helper_poll_fini(adev_to_drm(adev));
2846 
2847     dce_v10_0_audio_fini(adev);
2848 
2849     dce_v10_0_afmt_fini(adev);
2850 
2851     drm_mode_config_cleanup(adev_to_drm(adev));
2852     adev->mode_info.mode_config_initialized = false;
2853 
2854     return 0;
2855 }
2856 
2857 static int dce_v10_0_hw_init(void *handle)
2858 {
2859     int i;
2860     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2861 
2862     dce_v10_0_init_golden_registers(adev);
2863 
2864     /* disable vga render */
2865     dce_v10_0_set_vga_render_state(adev, false);
2866     /* init dig PHYs, disp eng pll */
2867     amdgpu_atombios_encoder_init_dig(adev);
2868     amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2869 
2870     /* initialize hpd */
2871     dce_v10_0_hpd_init(adev);
2872 
2873     for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2874         dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2875     }
2876 
2877     dce_v10_0_pageflip_interrupt_init(adev);
2878 
2879     return 0;
2880 }
2881 
2882 static int dce_v10_0_hw_fini(void *handle)
2883 {
2884     int i;
2885     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2886 
2887     dce_v10_0_hpd_fini(adev);
2888 
2889     for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2890         dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2891     }
2892 
2893     dce_v10_0_pageflip_interrupt_fini(adev);
2894 
2895     return 0;
2896 }
2897 
2898 static int dce_v10_0_suspend(void *handle)
2899 {
2900     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2901     int r;
2902 
2903     r = amdgpu_display_suspend_helper(adev);
2904     if (r)
2905         return r;
2906 
2907     adev->mode_info.bl_level =
2908         amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2909 
2910     return dce_v10_0_hw_fini(handle);
2911 }
2912 
2913 static int dce_v10_0_resume(void *handle)
2914 {
2915     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2916     int ret;
2917 
2918     amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2919                                adev->mode_info.bl_level);
2920 
2921     ret = dce_v10_0_hw_init(handle);
2922 
2923     /* turn on the BL */
2924     if (adev->mode_info.bl_encoder) {
2925         u8 bl_level = amdgpu_display_backlight_get_level(adev,
2926                                   adev->mode_info.bl_encoder);
2927         amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2928                             bl_level);
2929     }
2930     if (ret)
2931         return ret;
2932 
2933     return amdgpu_display_resume_helper(adev);
2934 }
2935 
2936 static bool dce_v10_0_is_idle(void *handle)
2937 {
2938     return true;
2939 }
2940 
2941 static int dce_v10_0_wait_for_idle(void *handle)
2942 {
2943     return 0;
2944 }
2945 
2946 static bool dce_v10_0_check_soft_reset(void *handle)
2947 {
2948     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2949 
2950     return dce_v10_0_is_display_hung(adev);
2951 }
2952 
2953 static int dce_v10_0_soft_reset(void *handle)
2954 {
2955     u32 srbm_soft_reset = 0, tmp;
2956     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2957 
2958     if (dce_v10_0_is_display_hung(adev))
2959         srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2960 
2961     if (srbm_soft_reset) {
2962         tmp = RREG32(mmSRBM_SOFT_RESET);
2963         tmp |= srbm_soft_reset;
2964         dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2965         WREG32(mmSRBM_SOFT_RESET, tmp);
2966         tmp = RREG32(mmSRBM_SOFT_RESET);
2967 
2968         udelay(50);
2969 
2970         tmp &= ~srbm_soft_reset;
2971         WREG32(mmSRBM_SOFT_RESET, tmp);
2972         tmp = RREG32(mmSRBM_SOFT_RESET);
2973 
2974         /* Wait a little for things to settle down */
2975         udelay(50);
2976     }
2977     return 0;
2978 }
2979 
2980 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2981                              int crtc,
2982                              enum amdgpu_interrupt_state state)
2983 {
2984     u32 lb_interrupt_mask;
2985 
2986     if (crtc >= adev->mode_info.num_crtc) {
2987         DRM_DEBUG("invalid crtc %d\n", crtc);
2988         return;
2989     }
2990 
2991     switch (state) {
2992     case AMDGPU_IRQ_STATE_DISABLE:
2993         lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2994         lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2995                           VBLANK_INTERRUPT_MASK, 0);
2996         WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2997         break;
2998     case AMDGPU_IRQ_STATE_ENABLE:
2999         lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3000         lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3001                           VBLANK_INTERRUPT_MASK, 1);
3002         WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3003         break;
3004     default:
3005         break;
3006     }
3007 }
3008 
3009 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3010                             int crtc,
3011                             enum amdgpu_interrupt_state state)
3012 {
3013     u32 lb_interrupt_mask;
3014 
3015     if (crtc >= adev->mode_info.num_crtc) {
3016         DRM_DEBUG("invalid crtc %d\n", crtc);
3017         return;
3018     }
3019 
3020     switch (state) {
3021     case AMDGPU_IRQ_STATE_DISABLE:
3022         lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3023         lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3024                           VLINE_INTERRUPT_MASK, 0);
3025         WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3026         break;
3027     case AMDGPU_IRQ_STATE_ENABLE:
3028         lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3029         lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3030                           VLINE_INTERRUPT_MASK, 1);
3031         WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3032         break;
3033     default:
3034         break;
3035     }
3036 }
3037 
3038 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3039                        struct amdgpu_irq_src *source,
3040                        unsigned hpd,
3041                        enum amdgpu_interrupt_state state)
3042 {
3043     u32 tmp;
3044 
3045     if (hpd >= adev->mode_info.num_hpd) {
3046         DRM_DEBUG("invalid hdp %d\n", hpd);
3047         return 0;
3048     }
3049 
3050     switch (state) {
3051     case AMDGPU_IRQ_STATE_DISABLE:
3052         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3053         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3054         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3055         break;
3056     case AMDGPU_IRQ_STATE_ENABLE:
3057         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3058         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3059         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3060         break;
3061     default:
3062         break;
3063     }
3064 
3065     return 0;
3066 }
3067 
3068 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3069                     struct amdgpu_irq_src *source,
3070                     unsigned type,
3071                     enum amdgpu_interrupt_state state)
3072 {
3073     switch (type) {
3074     case AMDGPU_CRTC_IRQ_VBLANK1:
3075         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3076         break;
3077     case AMDGPU_CRTC_IRQ_VBLANK2:
3078         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3079         break;
3080     case AMDGPU_CRTC_IRQ_VBLANK3:
3081         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3082         break;
3083     case AMDGPU_CRTC_IRQ_VBLANK4:
3084         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3085         break;
3086     case AMDGPU_CRTC_IRQ_VBLANK5:
3087         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3088         break;
3089     case AMDGPU_CRTC_IRQ_VBLANK6:
3090         dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3091         break;
3092     case AMDGPU_CRTC_IRQ_VLINE1:
3093         dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3094         break;
3095     case AMDGPU_CRTC_IRQ_VLINE2:
3096         dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3097         break;
3098     case AMDGPU_CRTC_IRQ_VLINE3:
3099         dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3100         break;
3101     case AMDGPU_CRTC_IRQ_VLINE4:
3102         dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3103         break;
3104     case AMDGPU_CRTC_IRQ_VLINE5:
3105         dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3106         break;
3107     case AMDGPU_CRTC_IRQ_VLINE6:
3108         dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3109         break;
3110     default:
3111         break;
3112     }
3113     return 0;
3114 }
3115 
3116 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3117                         struct amdgpu_irq_src *src,
3118                         unsigned type,
3119                         enum amdgpu_interrupt_state state)
3120 {
3121     u32 reg;
3122 
3123     if (type >= adev->mode_info.num_crtc) {
3124         DRM_ERROR("invalid pageflip crtc %d\n", type);
3125         return -EINVAL;
3126     }
3127 
3128     reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3129     if (state == AMDGPU_IRQ_STATE_DISABLE)
3130         WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3131                reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3132     else
3133         WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3134                reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3135 
3136     return 0;
3137 }
3138 
3139 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3140                   struct amdgpu_irq_src *source,
3141                   struct amdgpu_iv_entry *entry)
3142 {
3143     unsigned long flags;
3144     unsigned crtc_id;
3145     struct amdgpu_crtc *amdgpu_crtc;
3146     struct amdgpu_flip_work *works;
3147 
3148     crtc_id = (entry->src_id - 8) >> 1;
3149     amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3150 
3151     if (crtc_id >= adev->mode_info.num_crtc) {
3152         DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3153         return -EINVAL;
3154     }
3155 
3156     if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3157         GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3158         WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3159                GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3160 
3161     /* IRQ could occur when in initial stage */
3162     if (amdgpu_crtc == NULL)
3163         return 0;
3164 
3165     spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3166     works = amdgpu_crtc->pflip_works;
3167     if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3168         DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3169                          "AMDGPU_FLIP_SUBMITTED(%d)\n",
3170                          amdgpu_crtc->pflip_status,
3171                          AMDGPU_FLIP_SUBMITTED);
3172         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3173         return 0;
3174     }
3175 
3176     /* page flip completed. clean up */
3177     amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3178     amdgpu_crtc->pflip_works = NULL;
3179 
3180     /* wakeup usersapce */
3181     if (works->event)
3182         drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3183 
3184     spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3185 
3186     drm_crtc_vblank_put(&amdgpu_crtc->base);
3187     schedule_work(&works->unpin_work);
3188 
3189     return 0;
3190 }
3191 
3192 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3193                   int hpd)
3194 {
3195     u32 tmp;
3196 
3197     if (hpd >= adev->mode_info.num_hpd) {
3198         DRM_DEBUG("invalid hdp %d\n", hpd);
3199         return;
3200     }
3201 
3202     tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3203     tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3204     WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3205 }
3206 
3207 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3208                       int crtc)
3209 {
3210     u32 tmp;
3211 
3212     if (crtc >= adev->mode_info.num_crtc) {
3213         DRM_DEBUG("invalid crtc %d\n", crtc);
3214         return;
3215     }
3216 
3217     tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3218     tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3219     WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3220 }
3221 
3222 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3223                      int crtc)
3224 {
3225     u32 tmp;
3226 
3227     if (crtc >= adev->mode_info.num_crtc) {
3228         DRM_DEBUG("invalid crtc %d\n", crtc);
3229         return;
3230     }
3231 
3232     tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3233     tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3234     WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3235 }
3236 
3237 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3238                   struct amdgpu_irq_src *source,
3239                   struct amdgpu_iv_entry *entry)
3240 {
3241     unsigned crtc = entry->src_id - 1;
3242     uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3243     unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3244 
3245     switch (entry->src_data[0]) {
3246     case 0: /* vblank */
3247         if (disp_int & interrupt_status_offsets[crtc].vblank)
3248             dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3249         else
3250             DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3251 
3252         if (amdgpu_irq_enabled(adev, source, irq_type)) {
3253             drm_handle_vblank(adev_to_drm(adev), crtc);
3254         }
3255         DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3256 
3257         break;
3258     case 1: /* vline */
3259         if (disp_int & interrupt_status_offsets[crtc].vline)
3260             dce_v10_0_crtc_vline_int_ack(adev, crtc);
3261         else
3262             DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3263 
3264         DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3265 
3266         break;
3267     default:
3268         DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3269         break;
3270     }
3271 
3272     return 0;
3273 }
3274 
3275 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3276                  struct amdgpu_irq_src *source,
3277                  struct amdgpu_iv_entry *entry)
3278 {
3279     uint32_t disp_int, mask;
3280     unsigned hpd;
3281 
3282     if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3283         DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3284         return 0;
3285     }
3286 
3287     hpd = entry->src_data[0];
3288     disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3289     mask = interrupt_status_offsets[hpd].hpd;
3290 
3291     if (disp_int & mask) {
3292         dce_v10_0_hpd_int_ack(adev, hpd);
3293         schedule_work(&adev->hotplug_work);
3294         DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3295     }
3296 
3297     return 0;
3298 }
3299 
3300 static int dce_v10_0_set_clockgating_state(void *handle,
3301                       enum amd_clockgating_state state)
3302 {
3303     return 0;
3304 }
3305 
3306 static int dce_v10_0_set_powergating_state(void *handle,
3307                       enum amd_powergating_state state)
3308 {
3309     return 0;
3310 }
3311 
3312 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3313     .name = "dce_v10_0",
3314     .early_init = dce_v10_0_early_init,
3315     .late_init = NULL,
3316     .sw_init = dce_v10_0_sw_init,
3317     .sw_fini = dce_v10_0_sw_fini,
3318     .hw_init = dce_v10_0_hw_init,
3319     .hw_fini = dce_v10_0_hw_fini,
3320     .suspend = dce_v10_0_suspend,
3321     .resume = dce_v10_0_resume,
3322     .is_idle = dce_v10_0_is_idle,
3323     .wait_for_idle = dce_v10_0_wait_for_idle,
3324     .check_soft_reset = dce_v10_0_check_soft_reset,
3325     .soft_reset = dce_v10_0_soft_reset,
3326     .set_clockgating_state = dce_v10_0_set_clockgating_state,
3327     .set_powergating_state = dce_v10_0_set_powergating_state,
3328 };
3329 
3330 static void
3331 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3332               struct drm_display_mode *mode,
3333               struct drm_display_mode *adjusted_mode)
3334 {
3335     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3336 
3337     amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3338 
3339     /* need to call this here rather than in prepare() since we need some crtc info */
3340     amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3341 
3342     /* set scaler clears this on some chips */
3343     dce_v10_0_set_interleave(encoder->crtc, mode);
3344 
3345     if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3346         dce_v10_0_afmt_enable(encoder, true);
3347         dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3348     }
3349 }
3350 
3351 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3352 {
3353     struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3354     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3355     struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3356 
3357     if ((amdgpu_encoder->active_device &
3358          (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3359         (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3360          ENCODER_OBJECT_ID_NONE)) {
3361         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3362         if (dig) {
3363             dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3364             if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3365                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3366         }
3367     }
3368 
3369     amdgpu_atombios_scratch_regs_lock(adev, true);
3370 
3371     if (connector) {
3372         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3373 
3374         /* select the clock/data port if it uses a router */
3375         if (amdgpu_connector->router.cd_valid)
3376             amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3377 
3378         /* turn eDP panel on for mode set */
3379         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3380             amdgpu_atombios_encoder_set_edp_panel_power(connector,
3381                                  ATOM_TRANSMITTER_ACTION_POWER_ON);
3382     }
3383 
3384     /* this is needed for the pll/ss setup to work correctly in some cases */
3385     amdgpu_atombios_encoder_set_crtc_source(encoder);
3386     /* set up the FMT blocks */
3387     dce_v10_0_program_fmt(encoder);
3388 }
3389 
3390 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3391 {
3392     struct drm_device *dev = encoder->dev;
3393     struct amdgpu_device *adev = drm_to_adev(dev);
3394 
3395     /* need to call this here as we need the crtc set up */
3396     amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3397     amdgpu_atombios_scratch_regs_lock(adev, false);
3398 }
3399 
3400 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3401 {
3402     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3403     struct amdgpu_encoder_atom_dig *dig;
3404 
3405     amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3406 
3407     if (amdgpu_atombios_encoder_is_digital(encoder)) {
3408         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3409             dce_v10_0_afmt_enable(encoder, false);
3410         dig = amdgpu_encoder->enc_priv;
3411         dig->dig_encoder = -1;
3412     }
3413     amdgpu_encoder->active_device = 0;
3414 }
3415 
3416 /* these are handled by the primary encoders */
3417 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3418 {
3419 
3420 }
3421 
3422 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3423 {
3424 
3425 }
3426 
3427 static void
3428 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3429               struct drm_display_mode *mode,
3430               struct drm_display_mode *adjusted_mode)
3431 {
3432 
3433 }
3434 
3435 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3436 {
3437 
3438 }
3439 
3440 static void
3441 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3442 {
3443 
3444 }
3445 
3446 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3447     .dpms = dce_v10_0_ext_dpms,
3448     .prepare = dce_v10_0_ext_prepare,
3449     .mode_set = dce_v10_0_ext_mode_set,
3450     .commit = dce_v10_0_ext_commit,
3451     .disable = dce_v10_0_ext_disable,
3452     /* no detect for TMDS/LVDS yet */
3453 };
3454 
3455 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3456     .dpms = amdgpu_atombios_encoder_dpms,
3457     .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3458     .prepare = dce_v10_0_encoder_prepare,
3459     .mode_set = dce_v10_0_encoder_mode_set,
3460     .commit = dce_v10_0_encoder_commit,
3461     .disable = dce_v10_0_encoder_disable,
3462     .detect = amdgpu_atombios_encoder_dig_detect,
3463 };
3464 
3465 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3466     .dpms = amdgpu_atombios_encoder_dpms,
3467     .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3468     .prepare = dce_v10_0_encoder_prepare,
3469     .mode_set = dce_v10_0_encoder_mode_set,
3470     .commit = dce_v10_0_encoder_commit,
3471     .detect = amdgpu_atombios_encoder_dac_detect,
3472 };
3473 
3474 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3475 {
3476     struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3477     if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3478         amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3479     kfree(amdgpu_encoder->enc_priv);
3480     drm_encoder_cleanup(encoder);
3481     kfree(amdgpu_encoder);
3482 }
3483 
3484 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3485     .destroy = dce_v10_0_encoder_destroy,
3486 };
3487 
3488 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3489                  uint32_t encoder_enum,
3490                  uint32_t supported_device,
3491                  u16 caps)
3492 {
3493     struct drm_device *dev = adev_to_drm(adev);
3494     struct drm_encoder *encoder;
3495     struct amdgpu_encoder *amdgpu_encoder;
3496 
3497     /* see if we already added it */
3498     list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3499         amdgpu_encoder = to_amdgpu_encoder(encoder);
3500         if (amdgpu_encoder->encoder_enum == encoder_enum) {
3501             amdgpu_encoder->devices |= supported_device;
3502             return;
3503         }
3504 
3505     }
3506 
3507     /* add a new one */
3508     amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3509     if (!amdgpu_encoder)
3510         return;
3511 
3512     encoder = &amdgpu_encoder->base;
3513     switch (adev->mode_info.num_crtc) {
3514     case 1:
3515         encoder->possible_crtcs = 0x1;
3516         break;
3517     case 2:
3518     default:
3519         encoder->possible_crtcs = 0x3;
3520         break;
3521     case 4:
3522         encoder->possible_crtcs = 0xf;
3523         break;
3524     case 6:
3525         encoder->possible_crtcs = 0x3f;
3526         break;
3527     }
3528 
3529     amdgpu_encoder->enc_priv = NULL;
3530 
3531     amdgpu_encoder->encoder_enum = encoder_enum;
3532     amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3533     amdgpu_encoder->devices = supported_device;
3534     amdgpu_encoder->rmx_type = RMX_OFF;
3535     amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3536     amdgpu_encoder->is_ext_encoder = false;
3537     amdgpu_encoder->caps = caps;
3538 
3539     switch (amdgpu_encoder->encoder_id) {
3540     case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3541     case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3542         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3543                  DRM_MODE_ENCODER_DAC, NULL);
3544         drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3545         break;
3546     case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3547     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3548     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3549     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3550     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3551         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3552             amdgpu_encoder->rmx_type = RMX_FULL;
3553             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3554                      DRM_MODE_ENCODER_LVDS, NULL);
3555             amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3556         } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3557             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3558                      DRM_MODE_ENCODER_DAC, NULL);
3559             amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3560         } else {
3561             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3562                      DRM_MODE_ENCODER_TMDS, NULL);
3563             amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3564         }
3565         drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3566         break;
3567     case ENCODER_OBJECT_ID_SI170B:
3568     case ENCODER_OBJECT_ID_CH7303:
3569     case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3570     case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3571     case ENCODER_OBJECT_ID_TITFP513:
3572     case ENCODER_OBJECT_ID_VT1623:
3573     case ENCODER_OBJECT_ID_HDMI_SI1930:
3574     case ENCODER_OBJECT_ID_TRAVIS:
3575     case ENCODER_OBJECT_ID_NUTMEG:
3576         /* these are handled by the primary encoders */
3577         amdgpu_encoder->is_ext_encoder = true;
3578         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3579             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3580                      DRM_MODE_ENCODER_LVDS, NULL);
3581         else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3582             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3583                      DRM_MODE_ENCODER_DAC, NULL);
3584         else
3585             drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3586                      DRM_MODE_ENCODER_TMDS, NULL);
3587         drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3588         break;
3589     }
3590 }
3591 
3592 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3593     .bandwidth_update = &dce_v10_0_bandwidth_update,
3594     .vblank_get_counter = &dce_v10_0_vblank_get_counter,
3595     .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3596     .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3597     .hpd_sense = &dce_v10_0_hpd_sense,
3598     .hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3599     .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3600     .page_flip = &dce_v10_0_page_flip,
3601     .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3602     .add_encoder = &dce_v10_0_encoder_add,
3603     .add_connector = &amdgpu_connector_add,
3604 };
3605 
3606 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3607 {
3608     adev->mode_info.funcs = &dce_v10_0_display_funcs;
3609 }
3610 
3611 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3612     .set = dce_v10_0_set_crtc_irq_state,
3613     .process = dce_v10_0_crtc_irq,
3614 };
3615 
3616 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3617     .set = dce_v10_0_set_pageflip_irq_state,
3618     .process = dce_v10_0_pageflip_irq,
3619 };
3620 
3621 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3622     .set = dce_v10_0_set_hpd_irq_state,
3623     .process = dce_v10_0_hpd_irq,
3624 };
3625 
3626 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3627 {
3628     if (adev->mode_info.num_crtc > 0)
3629         adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3630     else
3631         adev->crtc_irq.num_types = 0;
3632     adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3633 
3634     adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3635     adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3636 
3637     adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3638     adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3639 }
3640 
3641 const struct amdgpu_ip_block_version dce_v10_0_ip_block =
3642 {
3643     .type = AMD_IP_BLOCK_TYPE_DCE,
3644     .major = 10,
3645     .minor = 0,
3646     .rev = 0,
3647     .funcs = &dce_v10_0_ip_funcs,
3648 };
3649 
3650 const struct amdgpu_ip_block_version dce_v10_1_ip_block =
3651 {
3652     .type = AMD_IP_BLOCK_TYPE_DCE,
3653     .major = 10,
3654     .minor = 1,
3655     .rev = 0,
3656     .funcs = &dce_v10_0_ip_funcs,
3657 };