Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2020-2021 Intel Corporation
0004  */
0005 
0006 #include "i915_drv.h"
0007 #include "i915_trace.h"
0008 #include "intel_display_types.h"
0009 #include "intel_dp_aux.h"
0010 #include "intel_pps.h"
0011 #include "intel_tc.h"
0012 
0013 static u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
0014 {
0015     int i;
0016     u32 v = 0;
0017 
0018     if (src_bytes > 4)
0019         src_bytes = 4;
0020     for (i = 0; i < src_bytes; i++)
0021         v |= ((u32)src[i]) << ((3 - i) * 8);
0022     return v;
0023 }
0024 
0025 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
0026 {
0027     int i;
0028 
0029     if (dst_bytes > 4)
0030         dst_bytes = 4;
0031     for (i = 0; i < dst_bytes; i++)
0032         dst[i] = src >> ((3 - i) * 8);
0033 }
0034 
0035 static u32
0036 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
0037 {
0038     struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0039     i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
0040     const unsigned int timeout_ms = 10;
0041     u32 status;
0042     bool done;
0043 
0044 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
0045     done = wait_event_timeout(i915->gmbus_wait_queue, C,
0046                   msecs_to_jiffies_timeout(timeout_ms));
0047 
0048     /* just trace the final value */
0049     trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
0050 
0051     if (!done)
0052         drm_err(&i915->drm,
0053             "%s: did not complete or timeout within %ums (status 0x%08x)\n",
0054             intel_dp->aux.name, timeout_ms, status);
0055 #undef C
0056 
0057     return status;
0058 }
0059 
0060 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
0061 {
0062     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0063 
0064     if (index)
0065         return 0;
0066 
0067     /*
0068      * The clock divider is based off the hrawclk, and would like to run at
0069      * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
0070      */
0071     return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
0072 }
0073 
0074 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
0075 {
0076     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0077     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0078     u32 freq;
0079 
0080     if (index)
0081         return 0;
0082 
0083     /*
0084      * The clock divider is based off the cdclk or PCH rawclk, and would
0085      * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
0086      * divide by 2000 and use that
0087      */
0088     if (dig_port->aux_ch == AUX_CH_A)
0089         freq = dev_priv->cdclk.hw.cdclk;
0090     else
0091         freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
0092     return DIV_ROUND_CLOSEST(freq, 2000);
0093 }
0094 
0095 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
0096 {
0097     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0098     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0099 
0100     if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
0101         /* Workaround for non-ULT HSW */
0102         switch (index) {
0103         case 0: return 63;
0104         case 1: return 72;
0105         default: return 0;
0106         }
0107     }
0108 
0109     return ilk_get_aux_clock_divider(intel_dp, index);
0110 }
0111 
0112 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
0113 {
0114     /*
0115      * SKL doesn't need us to program the AUX clock divider (Hardware will
0116      * derive the clock from CDCLK automatically). We still implement the
0117      * get_aux_clock_divider vfunc to plug-in into the existing code.
0118      */
0119     return index ? 0 : 1;
0120 }
0121 
0122 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
0123                 int send_bytes,
0124                 u32 aux_clock_divider)
0125 {
0126     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0127     struct drm_i915_private *dev_priv =
0128             to_i915(dig_port->base.base.dev);
0129     u32 timeout;
0130 
0131     /* Max timeout value on G4x-BDW: 1.6ms */
0132     if (IS_BROADWELL(dev_priv))
0133         timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
0134     else
0135         timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
0136 
0137     return DP_AUX_CH_CTL_SEND_BUSY |
0138            DP_AUX_CH_CTL_DONE |
0139            DP_AUX_CH_CTL_INTERRUPT |
0140            DP_AUX_CH_CTL_TIME_OUT_ERROR |
0141            timeout |
0142            DP_AUX_CH_CTL_RECEIVE_ERROR |
0143            (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
0144            (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
0145            (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
0146 }
0147 
0148 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
0149                 int send_bytes,
0150                 u32 unused)
0151 {
0152     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0153     u32 ret;
0154 
0155     /*
0156      * Max timeout values:
0157      * SKL-GLK: 1.6ms
0158      * ICL+: 4ms
0159      */
0160     ret = DP_AUX_CH_CTL_SEND_BUSY |
0161           DP_AUX_CH_CTL_DONE |
0162           DP_AUX_CH_CTL_INTERRUPT |
0163           DP_AUX_CH_CTL_TIME_OUT_ERROR |
0164           DP_AUX_CH_CTL_TIME_OUT_MAX |
0165           DP_AUX_CH_CTL_RECEIVE_ERROR |
0166           (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
0167           DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
0168           DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
0169 
0170     if (intel_tc_port_in_tbt_alt_mode(dig_port))
0171         ret |= DP_AUX_CH_CTL_TBT_IO;
0172 
0173     return ret;
0174 }
0175 
0176 static int
0177 intel_dp_aux_xfer(struct intel_dp *intel_dp,
0178           const u8 *send, int send_bytes,
0179           u8 *recv, int recv_size,
0180           u32 aux_send_ctl_flags)
0181 {
0182     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0183     struct drm_i915_private *i915 =
0184             to_i915(dig_port->base.base.dev);
0185     struct intel_uncore *uncore = &i915->uncore;
0186     enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
0187     bool is_tc_port = intel_phy_is_tc(i915, phy);
0188     i915_reg_t ch_ctl, ch_data[5];
0189     u32 aux_clock_divider;
0190     enum intel_display_power_domain aux_domain;
0191     intel_wakeref_t aux_wakeref;
0192     intel_wakeref_t pps_wakeref;
0193     int i, ret, recv_bytes;
0194     int try, clock = 0;
0195     u32 status;
0196     bool vdd;
0197 
0198     ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
0199     for (i = 0; i < ARRAY_SIZE(ch_data); i++)
0200         ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
0201 
0202     if (is_tc_port)
0203         intel_tc_port_lock(dig_port);
0204 
0205     aux_domain = intel_aux_power_domain(dig_port);
0206 
0207     aux_wakeref = intel_display_power_get(i915, aux_domain);
0208     pps_wakeref = intel_pps_lock(intel_dp);
0209 
0210     /*
0211      * We will be called with VDD already enabled for dpcd/edid/oui reads.
0212      * In such cases we want to leave VDD enabled and it's up to upper layers
0213      * to turn it off. But for eg. i2c-dev access we need to turn it on/off
0214      * ourselves.
0215      */
0216     vdd = intel_pps_vdd_on_unlocked(intel_dp);
0217 
0218     /*
0219      * dp aux is extremely sensitive to irq latency, hence request the
0220      * lowest possible wakeup latency and so prevent the cpu from going into
0221      * deep sleep states.
0222      */
0223     cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
0224 
0225     intel_pps_check_power_unlocked(intel_dp);
0226 
0227     /* Try to wait for any previous AUX channel activity */
0228     for (try = 0; try < 3; try++) {
0229         status = intel_uncore_read_notrace(uncore, ch_ctl);
0230         if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
0231             break;
0232         msleep(1);
0233     }
0234     /* just trace the final value */
0235     trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
0236 
0237     if (try == 3) {
0238         const u32 status = intel_uncore_read(uncore, ch_ctl);
0239 
0240         if (status != intel_dp->aux_busy_last_status) {
0241             drm_WARN(&i915->drm, 1,
0242                  "%s: not started (status 0x%08x)\n",
0243                  intel_dp->aux.name, status);
0244             intel_dp->aux_busy_last_status = status;
0245         }
0246 
0247         ret = -EBUSY;
0248         goto out;
0249     }
0250 
0251     /* Only 5 data registers! */
0252     if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
0253         ret = -E2BIG;
0254         goto out;
0255     }
0256 
0257     while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
0258         u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
0259                               send_bytes,
0260                               aux_clock_divider);
0261 
0262         send_ctl |= aux_send_ctl_flags;
0263 
0264         /* Must try at least 3 times according to DP spec */
0265         for (try = 0; try < 5; try++) {
0266             /* Load the send data into the aux channel data registers */
0267             for (i = 0; i < send_bytes; i += 4)
0268                 intel_uncore_write(uncore,
0269                            ch_data[i >> 2],
0270                            intel_dp_aux_pack(send + i,
0271                                      send_bytes - i));
0272 
0273             /* Send the command and wait for it to complete */
0274             intel_uncore_write(uncore, ch_ctl, send_ctl);
0275 
0276             status = intel_dp_aux_wait_done(intel_dp);
0277 
0278             /* Clear done status and any errors */
0279             intel_uncore_write(uncore,
0280                        ch_ctl,
0281                        status |
0282                        DP_AUX_CH_CTL_DONE |
0283                        DP_AUX_CH_CTL_TIME_OUT_ERROR |
0284                        DP_AUX_CH_CTL_RECEIVE_ERROR);
0285 
0286             /*
0287              * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
0288              *   400us delay required for errors and timeouts
0289              *   Timeout errors from the HW already meet this
0290              *   requirement so skip to next iteration
0291              */
0292             if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
0293                 continue;
0294 
0295             if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
0296                 usleep_range(400, 500);
0297                 continue;
0298             }
0299             if (status & DP_AUX_CH_CTL_DONE)
0300                 goto done;
0301         }
0302     }
0303 
0304     if ((status & DP_AUX_CH_CTL_DONE) == 0) {
0305         drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
0306             intel_dp->aux.name, status);
0307         ret = -EBUSY;
0308         goto out;
0309     }
0310 
0311 done:
0312     /*
0313      * Check for timeout or receive error. Timeouts occur when the sink is
0314      * not connected.
0315      */
0316     if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
0317         drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
0318             intel_dp->aux.name, status);
0319         ret = -EIO;
0320         goto out;
0321     }
0322 
0323     /*
0324      * Timeouts occur when the device isn't connected, so they're "normal"
0325      * -- don't fill the kernel log with these
0326      */
0327     if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
0328         drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
0329                 intel_dp->aux.name, status);
0330         ret = -ETIMEDOUT;
0331         goto out;
0332     }
0333 
0334     /* Unload any bytes sent back from the other side */
0335     recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
0336               DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
0337 
0338     /*
0339      * By BSpec: "Message sizes of 0 or >20 are not allowed."
0340      * We have no idea of what happened so we return -EBUSY so
0341      * drm layer takes care for the necessary retries.
0342      */
0343     if (recv_bytes == 0 || recv_bytes > 20) {
0344         drm_dbg_kms(&i915->drm,
0345                 "%s: Forbidden recv_bytes = %d on aux transaction\n",
0346                 intel_dp->aux.name, recv_bytes);
0347         ret = -EBUSY;
0348         goto out;
0349     }
0350 
0351     if (recv_bytes > recv_size)
0352         recv_bytes = recv_size;
0353 
0354     for (i = 0; i < recv_bytes; i += 4)
0355         intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]),
0356                     recv + i, recv_bytes - i);
0357 
0358     ret = recv_bytes;
0359 out:
0360     cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
0361 
0362     if (vdd)
0363         intel_pps_vdd_off_unlocked(intel_dp, false);
0364 
0365     intel_pps_unlock(intel_dp, pps_wakeref);
0366     intel_display_power_put_async(i915, aux_domain, aux_wakeref);
0367 
0368     if (is_tc_port)
0369         intel_tc_port_unlock(dig_port);
0370 
0371     return ret;
0372 }
0373 
0374 #define BARE_ADDRESS_SIZE   3
0375 #define HEADER_SIZE     (BARE_ADDRESS_SIZE + 1)
0376 
0377 static void
0378 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
0379             const struct drm_dp_aux_msg *msg)
0380 {
0381     txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
0382     txbuf[1] = (msg->address >> 8) & 0xff;
0383     txbuf[2] = msg->address & 0xff;
0384     txbuf[3] = msg->size - 1;
0385 }
0386 
0387 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
0388 {
0389     /*
0390      * If we're trying to send the HDCP Aksv, we need to set a the Aksv
0391      * select bit to inform the hardware to send the Aksv after our header
0392      * since we can't access that data from software.
0393      */
0394     if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
0395         msg->address == DP_AUX_HDCP_AKSV)
0396         return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
0397 
0398     return 0;
0399 }
0400 
0401 static ssize_t
0402 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
0403 {
0404     struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
0405     struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0406     u8 txbuf[20], rxbuf[20];
0407     size_t txsize, rxsize;
0408     u32 flags = intel_dp_aux_xfer_flags(msg);
0409     int ret;
0410 
0411     intel_dp_aux_header(txbuf, msg);
0412 
0413     switch (msg->request & ~DP_AUX_I2C_MOT) {
0414     case DP_AUX_NATIVE_WRITE:
0415     case DP_AUX_I2C_WRITE:
0416     case DP_AUX_I2C_WRITE_STATUS_UPDATE:
0417         txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
0418         rxsize = 2; /* 0 or 1 data bytes */
0419 
0420         if (drm_WARN_ON(&i915->drm, txsize > 20))
0421             return -E2BIG;
0422 
0423         drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
0424 
0425         if (msg->buffer)
0426             memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
0427 
0428         ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
0429                     rxbuf, rxsize, flags);
0430         if (ret > 0) {
0431             msg->reply = rxbuf[0] >> 4;
0432 
0433             if (ret > 1) {
0434                 /* Number of bytes written in a short write. */
0435                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
0436             } else {
0437                 /* Return payload size. */
0438                 ret = msg->size;
0439             }
0440         }
0441         break;
0442 
0443     case DP_AUX_NATIVE_READ:
0444     case DP_AUX_I2C_READ:
0445         txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
0446         rxsize = msg->size + 1;
0447 
0448         if (drm_WARN_ON(&i915->drm, rxsize > 20))
0449             return -E2BIG;
0450 
0451         ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
0452                     rxbuf, rxsize, flags);
0453         if (ret > 0) {
0454             msg->reply = rxbuf[0] >> 4;
0455             /*
0456              * Assume happy day, and copy the data. The caller is
0457              * expected to check msg->reply before touching it.
0458              *
0459              * Return payload size.
0460              */
0461             ret--;
0462             memcpy(msg->buffer, rxbuf + 1, ret);
0463         }
0464         break;
0465 
0466     default:
0467         ret = -EINVAL;
0468         break;
0469     }
0470 
0471     return ret;
0472 }
0473 
0474 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
0475 {
0476     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0477     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0478     enum aux_ch aux_ch = dig_port->aux_ch;
0479 
0480     switch (aux_ch) {
0481     case AUX_CH_B:
0482     case AUX_CH_C:
0483     case AUX_CH_D:
0484         return DP_AUX_CH_CTL(aux_ch);
0485     default:
0486         MISSING_CASE(aux_ch);
0487         return DP_AUX_CH_CTL(AUX_CH_B);
0488     }
0489 }
0490 
0491 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
0492 {
0493     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0494     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0495     enum aux_ch aux_ch = dig_port->aux_ch;
0496 
0497     switch (aux_ch) {
0498     case AUX_CH_B:
0499     case AUX_CH_C:
0500     case AUX_CH_D:
0501         return DP_AUX_CH_DATA(aux_ch, index);
0502     default:
0503         MISSING_CASE(aux_ch);
0504         return DP_AUX_CH_DATA(AUX_CH_B, index);
0505     }
0506 }
0507 
0508 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
0509 {
0510     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0511     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0512     enum aux_ch aux_ch = dig_port->aux_ch;
0513 
0514     switch (aux_ch) {
0515     case AUX_CH_A:
0516         return DP_AUX_CH_CTL(aux_ch);
0517     case AUX_CH_B:
0518     case AUX_CH_C:
0519     case AUX_CH_D:
0520         return PCH_DP_AUX_CH_CTL(aux_ch);
0521     default:
0522         MISSING_CASE(aux_ch);
0523         return DP_AUX_CH_CTL(AUX_CH_A);
0524     }
0525 }
0526 
0527 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
0528 {
0529     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0530     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0531     enum aux_ch aux_ch = dig_port->aux_ch;
0532 
0533     switch (aux_ch) {
0534     case AUX_CH_A:
0535         return DP_AUX_CH_DATA(aux_ch, index);
0536     case AUX_CH_B:
0537     case AUX_CH_C:
0538     case AUX_CH_D:
0539         return PCH_DP_AUX_CH_DATA(aux_ch, index);
0540     default:
0541         MISSING_CASE(aux_ch);
0542         return DP_AUX_CH_DATA(AUX_CH_A, index);
0543     }
0544 }
0545 
0546 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
0547 {
0548     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0549     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0550     enum aux_ch aux_ch = dig_port->aux_ch;
0551 
0552     switch (aux_ch) {
0553     case AUX_CH_A:
0554     case AUX_CH_B:
0555     case AUX_CH_C:
0556     case AUX_CH_D:
0557     case AUX_CH_E:
0558     case AUX_CH_F:
0559         return DP_AUX_CH_CTL(aux_ch);
0560     default:
0561         MISSING_CASE(aux_ch);
0562         return DP_AUX_CH_CTL(AUX_CH_A);
0563     }
0564 }
0565 
0566 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
0567 {
0568     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0569     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0570     enum aux_ch aux_ch = dig_port->aux_ch;
0571 
0572     switch (aux_ch) {
0573     case AUX_CH_A:
0574     case AUX_CH_B:
0575     case AUX_CH_C:
0576     case AUX_CH_D:
0577     case AUX_CH_E:
0578     case AUX_CH_F:
0579         return DP_AUX_CH_DATA(aux_ch, index);
0580     default:
0581         MISSING_CASE(aux_ch);
0582         return DP_AUX_CH_DATA(AUX_CH_A, index);
0583     }
0584 }
0585 
0586 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
0587 {
0588     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0589     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0590     enum aux_ch aux_ch = dig_port->aux_ch;
0591 
0592     switch (aux_ch) {
0593     case AUX_CH_A:
0594     case AUX_CH_B:
0595     case AUX_CH_C:
0596     case AUX_CH_USBC1:
0597     case AUX_CH_USBC2:
0598     case AUX_CH_USBC3:
0599     case AUX_CH_USBC4:
0600     case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
0601     case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
0602         return DP_AUX_CH_CTL(aux_ch);
0603     default:
0604         MISSING_CASE(aux_ch);
0605         return DP_AUX_CH_CTL(AUX_CH_A);
0606     }
0607 }
0608 
0609 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
0610 {
0611     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0612     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0613     enum aux_ch aux_ch = dig_port->aux_ch;
0614 
0615     switch (aux_ch) {
0616     case AUX_CH_A:
0617     case AUX_CH_B:
0618     case AUX_CH_C:
0619     case AUX_CH_USBC1:
0620     case AUX_CH_USBC2:
0621     case AUX_CH_USBC3:
0622     case AUX_CH_USBC4:
0623     case AUX_CH_USBC5:  /* aka AUX_CH_D_XELPD */
0624     case AUX_CH_USBC6:  /* aka AUX_CH_E_XELPD */
0625         return DP_AUX_CH_DATA(aux_ch, index);
0626     default:
0627         MISSING_CASE(aux_ch);
0628         return DP_AUX_CH_DATA(AUX_CH_A, index);
0629     }
0630 }
0631 
0632 void intel_dp_aux_fini(struct intel_dp *intel_dp)
0633 {
0634     if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
0635         cpu_latency_qos_remove_request(&intel_dp->pm_qos);
0636 
0637     kfree(intel_dp->aux.name);
0638 }
0639 
0640 void intel_dp_aux_init(struct intel_dp *intel_dp)
0641 {
0642     struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
0643     struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
0644     struct intel_encoder *encoder = &dig_port->base;
0645     enum aux_ch aux_ch = dig_port->aux_ch;
0646 
0647     if (DISPLAY_VER(dev_priv) >= 12) {
0648         intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
0649         intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
0650     } else if (DISPLAY_VER(dev_priv) >= 9) {
0651         intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
0652         intel_dp->aux_ch_data_reg = skl_aux_data_reg;
0653     } else if (HAS_PCH_SPLIT(dev_priv)) {
0654         intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
0655         intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
0656     } else {
0657         intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
0658         intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
0659     }
0660 
0661     if (DISPLAY_VER(dev_priv) >= 9)
0662         intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
0663     else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
0664         intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
0665     else if (HAS_PCH_SPLIT(dev_priv))
0666         intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
0667     else
0668         intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
0669 
0670     if (DISPLAY_VER(dev_priv) >= 9)
0671         intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
0672     else
0673         intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
0674 
0675     intel_dp->aux.drm_dev = &dev_priv->drm;
0676     drm_dp_aux_init(&intel_dp->aux);
0677 
0678     /* Failure to allocate our preferred name is not critical */
0679     if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD)
0680         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
0681                            aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D),
0682                            encoder->base.name);
0683     else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
0684         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
0685                            aux_ch - AUX_CH_USBC1 + '1',
0686                            encoder->base.name);
0687     else
0688         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
0689                            aux_ch_name(aux_ch),
0690                            encoder->base.name);
0691 
0692     intel_dp->aux.transfer = intel_dp_aux_transfer;
0693     cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
0694 }