Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * i.MX8 NWL MIPI DSI host driver
0004  *
0005  * Copyright (C) 2017 NXP
0006  * Copyright (C) 2020 Purism SPC
0007  */
0008 
0009 #include <linux/bitfield.h>
0010 #include <linux/bits.h>
0011 #include <linux/clk.h>
0012 #include <linux/irq.h>
0013 #include <linux/math64.h>
0014 #include <linux/mfd/syscon.h>
0015 #include <linux/media-bus-format.h>
0016 #include <linux/module.h>
0017 #include <linux/mux/consumer.h>
0018 #include <linux/of.h>
0019 #include <linux/of_platform.h>
0020 #include <linux/phy/phy.h>
0021 #include <linux/regmap.h>
0022 #include <linux/reset.h>
0023 #include <linux/sys_soc.h>
0024 #include <linux/time64.h>
0025 
0026 #include <drm/drm_atomic_state_helper.h>
0027 #include <drm/drm_bridge.h>
0028 #include <drm/drm_mipi_dsi.h>
0029 #include <drm/drm_of.h>
0030 #include <drm/drm_print.h>
0031 
0032 #include <video/mipi_display.h>
0033 
0034 #include "nwl-dsi.h"
0035 
0036 #define DRV_NAME "nwl-dsi"
0037 
0038 /* i.MX8 NWL quirks */
0039 /* i.MX8MQ errata E11418 */
0040 #define E11418_HS_MODE_QUIRK    BIT(0)
0041 
0042 #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
0043 
0044 enum transfer_direction {
0045     DSI_PACKET_SEND,
0046     DSI_PACKET_RECEIVE,
0047 };
0048 
0049 #define NWL_DSI_ENDPOINT_LCDIF 0
0050 #define NWL_DSI_ENDPOINT_DCSS 1
0051 
0052 struct nwl_dsi_transfer {
0053     const struct mipi_dsi_msg *msg;
0054     struct mipi_dsi_packet packet;
0055     struct completion completed;
0056 
0057     int status; /* status of transmission */
0058     enum transfer_direction direction;
0059     bool need_bta;
0060     u8 cmd;
0061     u16 rx_word_count;
0062     size_t tx_len; /* in bytes */
0063     size_t rx_len; /* in bytes */
0064 };
0065 
0066 struct nwl_dsi {
0067     struct drm_bridge bridge;
0068     struct mipi_dsi_host dsi_host;
0069     struct device *dev;
0070     struct phy *phy;
0071     union phy_configure_opts phy_cfg;
0072     unsigned int quirks;
0073 
0074     struct regmap *regmap;
0075     int irq;
0076     /*
0077      * The DSI host controller needs this reset sequence according to NWL:
0078      * 1. Deassert pclk reset to get access to DSI regs
0079      * 2. Configure DSI Host and DPHY and enable DPHY
0080      * 3. Deassert ESC and BYTE resets to allow host TX operations)
0081      * 4. Send DSI cmds to configure peripheral (handled by panel drv)
0082      * 5. Deassert DPI reset so DPI receives pixels and starts sending
0083      *    DSI data
0084      *
0085      * TODO: Since panel_bridges do their DSI setup in enable we
0086      * currently have 4. and 5. swapped.
0087      */
0088     struct reset_control *rst_byte;
0089     struct reset_control *rst_esc;
0090     struct reset_control *rst_dpi;
0091     struct reset_control *rst_pclk;
0092     struct mux_control *mux;
0093 
0094     /* DSI clocks */
0095     struct clk *phy_ref_clk;
0096     struct clk *rx_esc_clk;
0097     struct clk *tx_esc_clk;
0098     struct clk *core_clk;
0099     /*
0100      * hardware bug: the i.MX8MQ needs this clock on during reset
0101      * even when not using LCDIF.
0102      */
0103     struct clk *lcdif_clk;
0104 
0105     /* dsi lanes */
0106     u32 lanes;
0107     enum mipi_dsi_pixel_format format;
0108     struct drm_display_mode mode;
0109     unsigned long dsi_mode_flags;
0110     int error;
0111 
0112     struct nwl_dsi_transfer *xfer;
0113 };
0114 
0115 static const struct regmap_config nwl_dsi_regmap_config = {
0116     .reg_bits = 16,
0117     .val_bits = 32,
0118     .reg_stride = 4,
0119     .max_register = NWL_DSI_IRQ_MASK2,
0120     .name = DRV_NAME,
0121 };
0122 
0123 static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
0124 {
0125     return container_of(bridge, struct nwl_dsi, bridge);
0126 }
0127 
0128 static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
0129 {
0130     int ret = dsi->error;
0131 
0132     dsi->error = 0;
0133     return ret;
0134 }
0135 
0136 static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
0137 {
0138     int ret;
0139 
0140     if (dsi->error)
0141         return;
0142 
0143     ret = regmap_write(dsi->regmap, reg, val);
0144     if (ret < 0) {
0145         DRM_DEV_ERROR(dsi->dev,
0146                   "Failed to write NWL DSI reg 0x%x: %d\n", reg,
0147                   ret);
0148         dsi->error = ret;
0149     }
0150 }
0151 
0152 static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
0153 {
0154     unsigned int val;
0155     int ret;
0156 
0157     if (dsi->error)
0158         return 0;
0159 
0160     ret = regmap_read(dsi->regmap, reg, &val);
0161     if (ret < 0) {
0162         DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
0163                   reg, ret);
0164         dsi->error = ret;
0165     }
0166     return val;
0167 }
0168 
0169 static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
0170 {
0171     switch (format) {
0172     case MIPI_DSI_FMT_RGB565:
0173         return NWL_DSI_PIXEL_FORMAT_16;
0174     case MIPI_DSI_FMT_RGB666:
0175         return NWL_DSI_PIXEL_FORMAT_18L;
0176     case MIPI_DSI_FMT_RGB666_PACKED:
0177         return NWL_DSI_PIXEL_FORMAT_18;
0178     case MIPI_DSI_FMT_RGB888:
0179         return NWL_DSI_PIXEL_FORMAT_24;
0180     default:
0181         return -EINVAL;
0182     }
0183 }
0184 
0185 /*
0186  * ps2bc - Picoseconds to byte clock cycles
0187  */
0188 static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
0189 {
0190     u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
0191 
0192     return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
0193                   dsi->lanes * 8ULL * NSEC_PER_SEC);
0194 }
0195 
0196 /*
0197  * ui2bc - UI time periods to byte clock cycles
0198  */
0199 static u32 ui2bc(unsigned int ui)
0200 {
0201     return DIV_ROUND_UP(ui, BITS_PER_BYTE);
0202 }
0203 
0204 /*
0205  * us2bc - micro seconds to lp clock cycles
0206  */
0207 static u32 us2lp(u32 lp_clk_rate, unsigned long us)
0208 {
0209     return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
0210 }
0211 
0212 static int nwl_dsi_config_host(struct nwl_dsi *dsi)
0213 {
0214     u32 cycles;
0215     struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
0216 
0217     if (dsi->lanes < 1 || dsi->lanes > 4)
0218         return -EINVAL;
0219 
0220     DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
0221     nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
0222 
0223     if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
0224         nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
0225         nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
0226     } else {
0227         nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
0228         nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
0229     }
0230 
0231     /* values in byte clock cycles */
0232     cycles = ui2bc(cfg->clk_pre);
0233     DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
0234     nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
0235     cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
0236     DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
0237     cycles += ui2bc(cfg->clk_pre);
0238     DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
0239     nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
0240     cycles = ps2bc(dsi, cfg->hs_exit);
0241     DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
0242     nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
0243 
0244     nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
0245     nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
0246     nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
0247     nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
0248     /* In LP clock cycles */
0249     cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
0250     DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
0251     nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
0252 
0253     return nwl_dsi_clear_error(dsi);
0254 }
0255 
0256 static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
0257 {
0258     u32 mode;
0259     int color_format;
0260     bool burst_mode;
0261     int hfront_porch, hback_porch, vfront_porch, vback_porch;
0262     int hsync_len, vsync_len;
0263 
0264     hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
0265     hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
0266     hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
0267 
0268     vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
0269     vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
0270     vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
0271 
0272     DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
0273     DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
0274     DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
0275     DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
0276     DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
0277     DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
0278     DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
0279     DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
0280     DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
0281 
0282     color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
0283     if (color_format < 0) {
0284         DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
0285                   dsi->format);
0286         return color_format;
0287     }
0288     DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
0289 
0290     nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
0291     nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
0292     /*
0293      * Adjusting input polarity based on the video mode results in
0294      * a black screen so always pick active low:
0295      */
0296     nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
0297               NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
0298     nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
0299               NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
0300 
0301     burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
0302              !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
0303 
0304     if (burst_mode) {
0305         nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
0306         nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
0307     } else {
0308         mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
0309                 NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
0310                 NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
0311         nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
0312         nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
0313                   dsi->mode.hdisplay);
0314     }
0315 
0316     nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
0317     nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
0318     nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
0319 
0320     nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
0321     nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
0322     nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
0323     nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
0324 
0325     nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
0326     nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
0327     nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
0328     nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
0329 
0330     return nwl_dsi_clear_error(dsi);
0331 }
0332 
0333 static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
0334 {
0335     u32 irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
0336                 NWL_DSI_RX_PKT_HDR_RCVD_MASK |
0337                 NWL_DSI_TX_FIFO_OVFLW_MASK |
0338                 NWL_DSI_HS_TX_TIMEOUT_MASK);
0339 
0340     nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
0341     nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
0342 
0343     return nwl_dsi_clear_error(dsi);
0344 }
0345 
0346 static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
0347                    struct mipi_dsi_device *device)
0348 {
0349     struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
0350     struct device *dev = dsi->dev;
0351 
0352     DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
0353              device->format, device->mode_flags);
0354 
0355     if (device->lanes < 1 || device->lanes > 4)
0356         return -EINVAL;
0357 
0358     dsi->lanes = device->lanes;
0359     dsi->format = device->format;
0360     dsi->dsi_mode_flags = device->mode_flags;
0361 
0362     return 0;
0363 }
0364 
0365 static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
0366 {
0367     struct device *dev = dsi->dev;
0368     struct nwl_dsi_transfer *xfer = dsi->xfer;
0369     int err;
0370     u8 *payload = xfer->msg->rx_buf;
0371     u32 val;
0372     u16 word_count;
0373     u8 channel;
0374     u8 data_type;
0375 
0376     xfer->status = 0;
0377 
0378     if (xfer->rx_word_count == 0) {
0379         if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
0380             return false;
0381         /* Get the RX header and parse it */
0382         val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
0383         err = nwl_dsi_clear_error(dsi);
0384         if (err)
0385             xfer->status = err;
0386         word_count = NWL_DSI_WC(val);
0387         channel = NWL_DSI_RX_VC(val);
0388         data_type = NWL_DSI_RX_DT(val);
0389 
0390         if (channel != xfer->msg->channel) {
0391             DRM_DEV_ERROR(dev,
0392                       "[%02X] Channel mismatch (%u != %u)\n",
0393                       xfer->cmd, channel, xfer->msg->channel);
0394             xfer->status = -EINVAL;
0395             return true;
0396         }
0397 
0398         switch (data_type) {
0399         case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
0400         case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
0401             if (xfer->msg->rx_len > 1) {
0402                 /* read second byte */
0403                 payload[1] = word_count >> 8;
0404                 ++xfer->rx_len;
0405             }
0406             fallthrough;
0407         case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
0408         case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
0409             if (xfer->msg->rx_len > 0) {
0410                 /* read first byte */
0411                 payload[0] = word_count & 0xff;
0412                 ++xfer->rx_len;
0413             }
0414             xfer->status = xfer->rx_len;
0415             return true;
0416         case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
0417             word_count &= 0xff;
0418             DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
0419                       xfer->cmd, word_count);
0420             xfer->status = -EPROTO;
0421             return true;
0422         }
0423 
0424         if (word_count > xfer->msg->rx_len) {
0425             DRM_DEV_ERROR(dev,
0426                 "[%02X] Receive buffer too small: %zu (< %u)\n",
0427                 xfer->cmd, xfer->msg->rx_len, word_count);
0428             xfer->status = -EINVAL;
0429             return true;
0430         }
0431 
0432         xfer->rx_word_count = word_count;
0433     } else {
0434         /* Set word_count from previous header read */
0435         word_count = xfer->rx_word_count;
0436     }
0437 
0438     /* If RX payload is not yet received, wait for it */
0439     if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
0440         return false;
0441 
0442     /* Read the RX payload */
0443     while (word_count >= 4) {
0444         val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
0445         payload[0] = (val >> 0) & 0xff;
0446         payload[1] = (val >> 8) & 0xff;
0447         payload[2] = (val >> 16) & 0xff;
0448         payload[3] = (val >> 24) & 0xff;
0449         payload += 4;
0450         xfer->rx_len += 4;
0451         word_count -= 4;
0452     }
0453 
0454     if (word_count > 0) {
0455         val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
0456         switch (word_count) {
0457         case 3:
0458             payload[2] = (val >> 16) & 0xff;
0459             ++xfer->rx_len;
0460             fallthrough;
0461         case 2:
0462             payload[1] = (val >> 8) & 0xff;
0463             ++xfer->rx_len;
0464             fallthrough;
0465         case 1:
0466             payload[0] = (val >> 0) & 0xff;
0467             ++xfer->rx_len;
0468             break;
0469         }
0470     }
0471 
0472     xfer->status = xfer->rx_len;
0473     err = nwl_dsi_clear_error(dsi);
0474     if (err)
0475         xfer->status = err;
0476 
0477     return true;
0478 }
0479 
0480 static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
0481 {
0482     struct nwl_dsi_transfer *xfer = dsi->xfer;
0483     bool end_packet = false;
0484 
0485     if (!xfer)
0486         return;
0487 
0488     if (xfer->direction == DSI_PACKET_SEND &&
0489         status & NWL_DSI_TX_PKT_DONE) {
0490         xfer->status = xfer->tx_len;
0491         end_packet = true;
0492     } else if (status & NWL_DSI_DPHY_DIRECTION &&
0493            ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
0494                    NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
0495         end_packet = nwl_dsi_read_packet(dsi, status);
0496     }
0497 
0498     if (end_packet)
0499         complete(&xfer->completed);
0500 }
0501 
0502 static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
0503 {
0504     struct nwl_dsi_transfer *xfer = dsi->xfer;
0505     struct mipi_dsi_packet *pkt = &xfer->packet;
0506     const u8 *payload;
0507     size_t length;
0508     u16 word_count;
0509     u8 hs_mode;
0510     u32 val;
0511     u32 hs_workaround = 0;
0512 
0513     /* Send the payload, if any */
0514     length = pkt->payload_length;
0515     payload = pkt->payload;
0516 
0517     while (length >= 4) {
0518         val = *(u32 *)payload;
0519         hs_workaround |= !(val & 0xFFFF00);
0520         nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
0521         payload += 4;
0522         length -= 4;
0523     }
0524     /* Send the rest of the payload */
0525     val = 0;
0526     switch (length) {
0527     case 3:
0528         val |= payload[2] << 16;
0529         fallthrough;
0530     case 2:
0531         val |= payload[1] << 8;
0532         hs_workaround |= !(val & 0xFFFF00);
0533         fallthrough;
0534     case 1:
0535         val |= payload[0];
0536         nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
0537         break;
0538     }
0539     xfer->tx_len = pkt->payload_length;
0540 
0541     /*
0542      * Send the header
0543      * header[0] = Virtual Channel + Data Type
0544      * header[1] = Word Count LSB (LP) or first param (SP)
0545      * header[2] = Word Count MSB (LP) or second param (SP)
0546      */
0547     word_count = pkt->header[1] | (pkt->header[2] << 8);
0548     if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
0549         DRM_DEV_DEBUG_DRIVER(dsi->dev,
0550                      "Using hs mode workaround for cmd 0x%x\n",
0551                      xfer->cmd);
0552         hs_mode = 1;
0553     } else {
0554         hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
0555     }
0556     val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
0557           NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
0558           NWL_DSI_BTA_TX(xfer->need_bta);
0559     nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
0560 
0561     /* Send packet command */
0562     nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
0563 }
0564 
0565 static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
0566                      const struct mipi_dsi_msg *msg)
0567 {
0568     struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
0569     struct nwl_dsi_transfer xfer;
0570     ssize_t ret = 0;
0571 
0572     /* Create packet to be sent */
0573     dsi->xfer = &xfer;
0574     ret = mipi_dsi_create_packet(&xfer.packet, msg);
0575     if (ret < 0) {
0576         dsi->xfer = NULL;
0577         return ret;
0578     }
0579 
0580     if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
0581          msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
0582          msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
0583          msg->type & MIPI_DSI_DCS_READ) &&
0584         msg->rx_len > 0 && msg->rx_buf)
0585         xfer.direction = DSI_PACKET_RECEIVE;
0586     else
0587         xfer.direction = DSI_PACKET_SEND;
0588 
0589     xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
0590     xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
0591     xfer.msg = msg;
0592     xfer.status = -ETIMEDOUT;
0593     xfer.rx_word_count = 0;
0594     xfer.rx_len = 0;
0595     xfer.cmd = 0x00;
0596     if (msg->tx_len > 0)
0597         xfer.cmd = ((u8 *)(msg->tx_buf))[0];
0598     init_completion(&xfer.completed);
0599 
0600     ret = clk_prepare_enable(dsi->rx_esc_clk);
0601     if (ret < 0) {
0602         DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
0603                   ret);
0604         return ret;
0605     }
0606     DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
0607                  clk_get_rate(dsi->rx_esc_clk));
0608 
0609     /* Initiate the DSI packet transmision */
0610     nwl_dsi_begin_transmission(dsi);
0611 
0612     if (!wait_for_completion_timeout(&xfer.completed,
0613                      NWL_DSI_MIPI_FIFO_TIMEOUT)) {
0614         DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
0615                   xfer.cmd);
0616         ret = -ETIMEDOUT;
0617     } else {
0618         ret = xfer.status;
0619     }
0620 
0621     clk_disable_unprepare(dsi->rx_esc_clk);
0622 
0623     return ret;
0624 }
0625 
0626 static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
0627     .attach = nwl_dsi_host_attach,
0628     .transfer = nwl_dsi_host_transfer,
0629 };
0630 
0631 static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
0632 {
0633     u32 irq_status;
0634     struct nwl_dsi *dsi = data;
0635 
0636     irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
0637 
0638     if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
0639         DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
0640 
0641     if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
0642         DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
0643 
0644     if (irq_status & NWL_DSI_TX_PKT_DONE ||
0645         irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
0646         irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
0647         nwl_dsi_finish_transmission(dsi, irq_status);
0648 
0649     return IRQ_HANDLED;
0650 }
0651 
0652 static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
0653 {
0654     struct device *dev = dsi->dev;
0655     union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
0656     int ret;
0657 
0658     if (!dsi->lanes) {
0659         DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
0660         return -EINVAL;
0661     }
0662 
0663     ret = phy_init(dsi->phy);
0664     if (ret < 0) {
0665         DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
0666         return ret;
0667     }
0668 
0669     ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
0670     if (ret < 0) {
0671         DRM_DEV_ERROR(dev, "Failed to set DSI phy mode: %d\n", ret);
0672         goto uninit_phy;
0673     }
0674 
0675     ret = phy_configure(dsi->phy, phy_cfg);
0676     if (ret < 0) {
0677         DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
0678         goto uninit_phy;
0679     }
0680 
0681     ret = clk_prepare_enable(dsi->tx_esc_clk);
0682     if (ret < 0) {
0683         DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
0684                   ret);
0685         goto uninit_phy;
0686     }
0687     DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
0688                  clk_get_rate(dsi->tx_esc_clk));
0689 
0690     ret = nwl_dsi_config_host(dsi);
0691     if (ret < 0) {
0692         DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
0693         goto disable_clock;
0694     }
0695 
0696     ret = nwl_dsi_config_dpi(dsi);
0697     if (ret < 0) {
0698         DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
0699         goto disable_clock;
0700     }
0701 
0702     ret = phy_power_on(dsi->phy);
0703     if (ret < 0) {
0704         DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
0705         goto disable_clock;
0706     }
0707 
0708     ret = nwl_dsi_init_interrupts(dsi);
0709     if (ret < 0)
0710         goto power_off_phy;
0711 
0712     return ret;
0713 
0714 power_off_phy:
0715     phy_power_off(dsi->phy);
0716 disable_clock:
0717     clk_disable_unprepare(dsi->tx_esc_clk);
0718 uninit_phy:
0719     phy_exit(dsi->phy);
0720 
0721     return ret;
0722 }
0723 
0724 static int nwl_dsi_disable(struct nwl_dsi *dsi)
0725 {
0726     struct device *dev = dsi->dev;
0727 
0728     DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
0729 
0730     phy_power_off(dsi->phy);
0731     phy_exit(dsi->phy);
0732 
0733     /* Disabling the clock before the phy breaks enabling dsi again */
0734     clk_disable_unprepare(dsi->tx_esc_clk);
0735 
0736     return 0;
0737 }
0738 
0739 static void
0740 nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
0741                   struct drm_bridge_state *old_bridge_state)
0742 {
0743     struct nwl_dsi *dsi = bridge_to_dsi(bridge);
0744     int ret;
0745 
0746     nwl_dsi_disable(dsi);
0747 
0748     ret = reset_control_assert(dsi->rst_dpi);
0749     if (ret < 0) {
0750         DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
0751         return;
0752     }
0753     ret = reset_control_assert(dsi->rst_byte);
0754     if (ret < 0) {
0755         DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
0756         return;
0757     }
0758     ret = reset_control_assert(dsi->rst_esc);
0759     if (ret < 0) {
0760         DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
0761         return;
0762     }
0763     ret = reset_control_assert(dsi->rst_pclk);
0764     if (ret < 0) {
0765         DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
0766         return;
0767     }
0768 
0769     clk_disable_unprepare(dsi->core_clk);
0770     clk_disable_unprepare(dsi->lcdif_clk);
0771 
0772     pm_runtime_put(dsi->dev);
0773 }
0774 
0775 static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
0776                    const struct drm_display_mode *mode,
0777                    union phy_configure_opts *phy_opts)
0778 {
0779     unsigned long rate;
0780     int ret;
0781 
0782     if (dsi->lanes < 1 || dsi->lanes > 4)
0783         return -EINVAL;
0784 
0785     /*
0786      * So far the DPHY spec minimal timings work for both mixel
0787      * dphy and nwl dsi host
0788      */
0789     ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
0790         mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
0791         &phy_opts->mipi_dphy);
0792     if (ret < 0)
0793         return ret;
0794 
0795     rate = clk_get_rate(dsi->tx_esc_clk);
0796     DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
0797     phy_opts->mipi_dphy.lp_clk_rate = rate;
0798 
0799     return 0;
0800 }
0801 
0802 static enum drm_mode_status
0803 nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
0804               const struct drm_display_info *info,
0805               const struct drm_display_mode *mode)
0806 {
0807     struct nwl_dsi *dsi = bridge_to_dsi(bridge);
0808     int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
0809 
0810     if (mode->clock * bpp > 15000000 * dsi->lanes)
0811         return MODE_CLOCK_HIGH;
0812 
0813     if (mode->clock * bpp < 80000 * dsi->lanes)
0814         return MODE_CLOCK_LOW;
0815 
0816     return MODE_OK;
0817 }
0818 
0819 static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
0820                        struct drm_bridge_state *bridge_state,
0821                        struct drm_crtc_state *crtc_state,
0822                        struct drm_connector_state *conn_state)
0823 {
0824     struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
0825 
0826     /* At least LCDIF + NWL needs active high sync */
0827     adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
0828     adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
0829 
0830     /*
0831      * Do a full modeset if crtc_state->active is changed to be true.
0832      * This ensures our ->mode_set() is called to get the DSI controller
0833      * and the PHY ready to send DCS commands, when only the connector's
0834      * DPMS is brought out of "Off" status.
0835      */
0836     if (crtc_state->active_changed && crtc_state->active)
0837         crtc_state->mode_changed = true;
0838 
0839     return 0;
0840 }
0841 
0842 static void
0843 nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
0844             const struct drm_display_mode *mode,
0845             const struct drm_display_mode *adjusted_mode)
0846 {
0847     struct nwl_dsi *dsi = bridge_to_dsi(bridge);
0848     struct device *dev = dsi->dev;
0849     union phy_configure_opts new_cfg;
0850     unsigned long phy_ref_rate;
0851     int ret;
0852 
0853     ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
0854     if (ret < 0)
0855         return;
0856 
0857     phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
0858     DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
0859     /* Save the new desired phy config */
0860     memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
0861 
0862     drm_mode_copy(&dsi->mode, adjusted_mode);
0863     drm_mode_debug_printmodeline(adjusted_mode);
0864 
0865     if (pm_runtime_resume_and_get(dev) < 0)
0866         return;
0867 
0868     if (clk_prepare_enable(dsi->lcdif_clk) < 0)
0869         goto runtime_put;
0870     if (clk_prepare_enable(dsi->core_clk) < 0)
0871         goto runtime_put;
0872 
0873     /* Step 1 from DSI reset-out instructions */
0874     ret = reset_control_deassert(dsi->rst_pclk);
0875     if (ret < 0) {
0876         DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
0877         goto runtime_put;
0878     }
0879 
0880     /* Step 2 from DSI reset-out instructions */
0881     nwl_dsi_mode_set(dsi);
0882 
0883     /* Step 3 from DSI reset-out instructions */
0884     ret = reset_control_deassert(dsi->rst_esc);
0885     if (ret < 0) {
0886         DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
0887         goto runtime_put;
0888     }
0889     ret = reset_control_deassert(dsi->rst_byte);
0890     if (ret < 0) {
0891         DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
0892         goto runtime_put;
0893     }
0894 
0895     return;
0896 
0897 runtime_put:
0898     pm_runtime_put_sync(dev);
0899 }
0900 
0901 static void
0902 nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
0903                  struct drm_bridge_state *old_bridge_state)
0904 {
0905     struct nwl_dsi *dsi = bridge_to_dsi(bridge);
0906     int ret;
0907 
0908     /* Step 5 from DSI reset-out instructions */
0909     ret = reset_control_deassert(dsi->rst_dpi);
0910     if (ret < 0)
0911         DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
0912 }
0913 
0914 static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
0915                  enum drm_bridge_attach_flags flags)
0916 {
0917     struct nwl_dsi *dsi = bridge_to_dsi(bridge);
0918     struct drm_bridge *panel_bridge;
0919 
0920     panel_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0);
0921     if (IS_ERR(panel_bridge))
0922         return PTR_ERR(panel_bridge);
0923 
0924     return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
0925 }
0926 
0927 static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
0928                          struct drm_bridge_state *bridge_state,
0929                          struct drm_crtc_state *crtc_state,
0930                          struct drm_connector_state *conn_state,
0931                          u32 output_fmt,
0932                          unsigned int *num_input_fmts)
0933 {
0934     u32 *input_fmts, input_fmt;
0935 
0936     *num_input_fmts = 0;
0937 
0938     switch (output_fmt) {
0939     /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
0940     case MEDIA_BUS_FMT_FIXED:
0941         input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
0942         break;
0943     case MEDIA_BUS_FMT_RGB888_1X24:
0944     case MEDIA_BUS_FMT_RGB666_1X18:
0945     case MEDIA_BUS_FMT_RGB565_1X16:
0946         input_fmt = output_fmt;
0947         break;
0948     default:
0949         return NULL;
0950     }
0951 
0952     input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
0953     if (!input_fmts)
0954         return NULL;
0955     input_fmts[0] = input_fmt;
0956     *num_input_fmts = 1;
0957 
0958     return input_fmts;
0959 }
0960 
0961 static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
0962     .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
0963     .atomic_destroy_state   = drm_atomic_helper_bridge_destroy_state,
0964     .atomic_reset       = drm_atomic_helper_bridge_reset,
0965     .atomic_check       = nwl_dsi_bridge_atomic_check,
0966     .atomic_enable      = nwl_dsi_bridge_atomic_enable,
0967     .atomic_disable     = nwl_dsi_bridge_atomic_disable,
0968     .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts,
0969     .mode_set       = nwl_dsi_bridge_mode_set,
0970     .mode_valid     = nwl_dsi_bridge_mode_valid,
0971     .attach         = nwl_dsi_bridge_attach,
0972 };
0973 
0974 static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
0975 {
0976     struct platform_device *pdev = to_platform_device(dsi->dev);
0977     struct clk *clk;
0978     void __iomem *base;
0979     int ret;
0980 
0981     dsi->phy = devm_phy_get(dsi->dev, "dphy");
0982     if (IS_ERR(dsi->phy)) {
0983         ret = PTR_ERR(dsi->phy);
0984         if (ret != -EPROBE_DEFER)
0985             DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
0986         return ret;
0987     }
0988 
0989     clk = devm_clk_get(dsi->dev, "lcdif");
0990     if (IS_ERR(clk)) {
0991         ret = PTR_ERR(clk);
0992         DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
0993                   ret);
0994         return ret;
0995     }
0996     dsi->lcdif_clk = clk;
0997 
0998     clk = devm_clk_get(dsi->dev, "core");
0999     if (IS_ERR(clk)) {
1000         ret = PTR_ERR(clk);
1001         DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
1002                   ret);
1003         return ret;
1004     }
1005     dsi->core_clk = clk;
1006 
1007     clk = devm_clk_get(dsi->dev, "phy_ref");
1008     if (IS_ERR(clk)) {
1009         ret = PTR_ERR(clk);
1010         DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
1011                   ret);
1012         return ret;
1013     }
1014     dsi->phy_ref_clk = clk;
1015 
1016     clk = devm_clk_get(dsi->dev, "rx_esc");
1017     if (IS_ERR(clk)) {
1018         ret = PTR_ERR(clk);
1019         DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
1020                   ret);
1021         return ret;
1022     }
1023     dsi->rx_esc_clk = clk;
1024 
1025     clk = devm_clk_get(dsi->dev, "tx_esc");
1026     if (IS_ERR(clk)) {
1027         ret = PTR_ERR(clk);
1028         DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
1029                   ret);
1030         return ret;
1031     }
1032     dsi->tx_esc_clk = clk;
1033 
1034     dsi->mux = devm_mux_control_get(dsi->dev, NULL);
1035     if (IS_ERR(dsi->mux)) {
1036         ret = PTR_ERR(dsi->mux);
1037         if (ret != -EPROBE_DEFER)
1038             DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
1039         return ret;
1040     }
1041 
1042     base = devm_platform_ioremap_resource(pdev, 0);
1043     if (IS_ERR(base))
1044         return PTR_ERR(base);
1045 
1046     dsi->regmap =
1047         devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
1048     if (IS_ERR(dsi->regmap)) {
1049         ret = PTR_ERR(dsi->regmap);
1050         DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
1051                   ret);
1052         return ret;
1053     }
1054 
1055     dsi->irq = platform_get_irq(pdev, 0);
1056     if (dsi->irq < 0) {
1057         DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
1058                   dsi->irq);
1059         return dsi->irq;
1060     }
1061 
1062     dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
1063     if (IS_ERR(dsi->rst_pclk)) {
1064         DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
1065                   PTR_ERR(dsi->rst_pclk));
1066         return PTR_ERR(dsi->rst_pclk);
1067     }
1068     dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
1069     if (IS_ERR(dsi->rst_byte)) {
1070         DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
1071                   PTR_ERR(dsi->rst_byte));
1072         return PTR_ERR(dsi->rst_byte);
1073     }
1074     dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
1075     if (IS_ERR(dsi->rst_esc)) {
1076         DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
1077                   PTR_ERR(dsi->rst_esc));
1078         return PTR_ERR(dsi->rst_esc);
1079     }
1080     dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
1081     if (IS_ERR(dsi->rst_dpi)) {
1082         DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
1083                   PTR_ERR(dsi->rst_dpi));
1084         return PTR_ERR(dsi->rst_dpi);
1085     }
1086     return 0;
1087 }
1088 
1089 static int nwl_dsi_select_input(struct nwl_dsi *dsi)
1090 {
1091     struct device_node *remote;
1092     u32 use_dcss = 1;
1093     int ret;
1094 
1095     remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
1096                       NWL_DSI_ENDPOINT_LCDIF);
1097     if (remote) {
1098         use_dcss = 0;
1099     } else {
1100         remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
1101                           NWL_DSI_ENDPOINT_DCSS);
1102         if (!remote) {
1103             DRM_DEV_ERROR(dsi->dev,
1104                       "No valid input endpoint found\n");
1105             return -EINVAL;
1106         }
1107     }
1108 
1109     DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
1110              (use_dcss) ? "DCSS" : "LCDIF");
1111     ret = mux_control_try_select(dsi->mux, use_dcss);
1112     if (ret < 0)
1113         DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
1114 
1115     of_node_put(remote);
1116     return ret;
1117 }
1118 
1119 static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
1120 {
1121     int ret;
1122 
1123     ret = mux_control_deselect(dsi->mux);
1124     if (ret < 0)
1125         DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
1126 
1127     return ret;
1128 }
1129 
1130 static const struct drm_bridge_timings nwl_dsi_timings = {
1131     .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
1132 };
1133 
1134 static const struct of_device_id nwl_dsi_dt_ids[] = {
1135     { .compatible = "fsl,imx8mq-nwl-dsi", },
1136     { /* sentinel */ }
1137 };
1138 MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
1139 
1140 static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
1141     { .soc_id = "i.MX8MQ", .revision = "2.0",
1142       .data = (void *)E11418_HS_MODE_QUIRK },
1143     { /* sentinel. */ }
1144 };
1145 
1146 static int nwl_dsi_probe(struct platform_device *pdev)
1147 {
1148     struct device *dev = &pdev->dev;
1149     const struct soc_device_attribute *attr;
1150     struct nwl_dsi *dsi;
1151     int ret;
1152 
1153     dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1154     if (!dsi)
1155         return -ENOMEM;
1156 
1157     dsi->dev = dev;
1158 
1159     ret = nwl_dsi_parse_dt(dsi);
1160     if (ret)
1161         return ret;
1162 
1163     ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
1164                    dev_name(dev), dsi);
1165     if (ret < 0) {
1166         DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
1167                   ret);
1168         return ret;
1169     }
1170 
1171     dsi->dsi_host.ops = &nwl_dsi_host_ops;
1172     dsi->dsi_host.dev = dev;
1173     ret = mipi_dsi_host_register(&dsi->dsi_host);
1174     if (ret) {
1175         DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
1176         return ret;
1177     }
1178 
1179     attr = soc_device_match(nwl_dsi_quirks_match);
1180     if (attr)
1181         dsi->quirks = (uintptr_t)attr->data;
1182 
1183     dsi->bridge.driver_private = dsi;
1184     dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
1185     dsi->bridge.of_node = dev->of_node;
1186     dsi->bridge.timings = &nwl_dsi_timings;
1187 
1188     dev_set_drvdata(dev, dsi);
1189     pm_runtime_enable(dev);
1190 
1191     ret = nwl_dsi_select_input(dsi);
1192     if (ret < 0) {
1193         pm_runtime_disable(dev);
1194         mipi_dsi_host_unregister(&dsi->dsi_host);
1195         return ret;
1196     }
1197 
1198     drm_bridge_add(&dsi->bridge);
1199     return 0;
1200 }
1201 
1202 static int nwl_dsi_remove(struct platform_device *pdev)
1203 {
1204     struct nwl_dsi *dsi = platform_get_drvdata(pdev);
1205 
1206     nwl_dsi_deselect_input(dsi);
1207     mipi_dsi_host_unregister(&dsi->dsi_host);
1208     drm_bridge_remove(&dsi->bridge);
1209     pm_runtime_disable(&pdev->dev);
1210     return 0;
1211 }
1212 
1213 static struct platform_driver nwl_dsi_driver = {
1214     .probe      = nwl_dsi_probe,
1215     .remove     = nwl_dsi_remove,
1216     .driver     = {
1217         .of_match_table = nwl_dsi_dt_ids,
1218         .name   = DRV_NAME,
1219     },
1220 };
1221 
1222 module_platform_driver(nwl_dsi_driver);
1223 
1224 MODULE_AUTHOR("NXP Semiconductor");
1225 MODULE_AUTHOR("Purism SPC");
1226 MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
1227 MODULE_LICENSE("GPL"); /* GPLv2 or later */