0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/bitops.h>
0015 #include <linux/delay.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/iopoll.h>
0018 #include <linux/mm.h>
0019 #include <linux/module.h>
0020 #include <linux/pci.h>
0021 #include <linux/pfn.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/property.h>
0024 #include <linux/vmalloc.h>
0025 #include <media/v4l2-ctrls.h>
0026 #include <media/v4l2-device.h>
0027 #include <media/v4l2-event.h>
0028 #include <media/v4l2-fwnode.h>
0029 #include <media/v4l2-ioctl.h>
0030 #include <media/videobuf2-dma-sg.h>
0031
0032 #include "ipu3-cio2.h"
0033
0034 struct ipu3_cio2_fmt {
0035 u32 mbus_code;
0036 u32 fourcc;
0037 u8 mipicode;
0038 u8 bpp;
0039 };
0040
0041
0042
0043
0044
0045
0046
0047 static const struct ipu3_cio2_fmt formats[] = {
0048 {
0049 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
0050 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
0051 .mipicode = 0x2b,
0052 .bpp = 10,
0053 }, {
0054 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
0055 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
0056 .mipicode = 0x2b,
0057 .bpp = 10,
0058 }, {
0059 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
0060 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
0061 .mipicode = 0x2b,
0062 .bpp = 10,
0063 }, {
0064 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
0065 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
0066 .mipicode = 0x2b,
0067 .bpp = 10,
0068 }, {
0069 .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
0070 .fourcc = V4L2_PIX_FMT_IPU3_Y10,
0071 .mipicode = 0x2b,
0072 .bpp = 10,
0073 },
0074 };
0075
0076
0077
0078
0079
0080
0081 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
0082 const u32 *mbus_code)
0083 {
0084 unsigned int i;
0085
0086 for (i = 0; i < ARRAY_SIZE(formats); i++) {
0087 if (pixelformat && *pixelformat != formats[i].fourcc)
0088 continue;
0089 if (mbus_code && *mbus_code != formats[i].mbus_code)
0090 continue;
0091
0092 return &formats[i];
0093 }
0094
0095 return NULL;
0096 }
0097
0098 static inline u32 cio2_bytesperline(const unsigned int width)
0099 {
0100
0101
0102
0103
0104 return DIV_ROUND_UP(width, 50) * 64;
0105 }
0106
0107
0108
0109 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
0110 {
0111 struct device *dev = &cio2->pci_dev->dev;
0112
0113 if (cio2->dummy_lop) {
0114 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
0115 cio2->dummy_lop_bus_addr);
0116 cio2->dummy_lop = NULL;
0117 }
0118 if (cio2->dummy_page) {
0119 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
0120 cio2->dummy_page_bus_addr);
0121 cio2->dummy_page = NULL;
0122 }
0123 }
0124
0125 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
0126 {
0127 struct device *dev = &cio2->pci_dev->dev;
0128 unsigned int i;
0129
0130 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
0131 &cio2->dummy_page_bus_addr,
0132 GFP_KERNEL);
0133 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
0134 &cio2->dummy_lop_bus_addr,
0135 GFP_KERNEL);
0136 if (!cio2->dummy_page || !cio2->dummy_lop) {
0137 cio2_fbpt_exit_dummy(cio2);
0138 return -ENOMEM;
0139 }
0140
0141
0142
0143
0144 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
0145 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
0146
0147 return 0;
0148 }
0149
0150 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
0151 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
0152 {
0153
0154
0155
0156
0157
0158
0159 dma_wmb();
0160
0161
0162
0163
0164
0165 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
0166 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
0167 }
0168
0169
0170 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
0171 struct cio2_fbpt_entry
0172 entry[CIO2_MAX_LOPS])
0173 {
0174 unsigned int i;
0175
0176 entry[0].first_entry.first_page_offset = 0;
0177 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
0178 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
0179
0180 for (i = 0; i < CIO2_MAX_LOPS; i++)
0181 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
0182
0183 cio2_fbpt_entry_enable(cio2, entry);
0184 }
0185
0186
0187 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
0188 struct cio2_buffer *b,
0189 struct cio2_fbpt_entry
0190 entry[CIO2_MAX_LOPS])
0191 {
0192 struct vb2_buffer *vb = &b->vbb.vb2_buf;
0193 unsigned int length = vb->planes[0].length;
0194 int remaining, i;
0195
0196 entry[0].first_entry.first_page_offset = b->offset;
0197 remaining = length + entry[0].first_entry.first_page_offset;
0198 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
0199
0200
0201
0202
0203
0204
0205
0206
0207 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
0208 entry[1].second_entry.last_page_available_bytes = remaining - 1;
0209
0210 remaining = length;
0211 i = 0;
0212 while (remaining > 0) {
0213 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
0214 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
0215 entry++;
0216 i++;
0217 }
0218
0219
0220
0221
0222 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
0223
0224 cio2_fbpt_entry_enable(cio2, entry);
0225 }
0226
0227 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
0228 {
0229 struct device *dev = &cio2->pci_dev->dev;
0230
0231 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
0232 GFP_KERNEL);
0233 if (!q->fbpt)
0234 return -ENOMEM;
0235
0236 return 0;
0237 }
0238
0239 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
0240 {
0241 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
0242 }
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 #define LIMIT_SHIFT 8
0280
0281 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
0282 {
0283 const u32 accinv = 16;
0284 const u32 uiinv = 500000000;
0285 s32 r;
0286
0287 freq >>= LIMIT_SHIFT;
0288
0289 if (WARN_ON(freq <= 0 || freq > S32_MAX))
0290 return def;
0291
0292
0293
0294
0295 r = accinv * b * (uiinv >> LIMIT_SHIFT);
0296 r = r / (s32)freq;
0297
0298 r += accinv * a;
0299
0300 return r;
0301 };
0302
0303
0304 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
0305 struct cio2_csi2_timing *timing,
0306 unsigned int bpp, unsigned int lanes)
0307 {
0308 struct device *dev = &cio2->pci_dev->dev;
0309 s64 freq;
0310
0311 if (!q->sensor)
0312 return -ENODEV;
0313
0314 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
0315 if (freq < 0) {
0316 dev_err(dev, "error %lld, invalid link_freq\n", freq);
0317 return freq;
0318 }
0319
0320 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
0321 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
0322 freq,
0323 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
0324 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
0325 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
0326 freq,
0327 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
0328 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
0329 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
0330 freq,
0331 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
0332 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
0333 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
0334 freq,
0335 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
0336
0337 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
0338 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
0339 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
0340 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
0341
0342 return 0;
0343 };
0344
0345 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
0346 {
0347 static const int NUM_VCS = 4;
0348 static const int SID;
0349 static const int ENTRY;
0350 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
0351 CIO2_FBPT_SUBENTRY_UNIT);
0352 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
0353 const struct ipu3_cio2_fmt *fmt;
0354 void __iomem *const base = cio2->base;
0355 u8 lanes, csi2bus = q->csi2.port;
0356 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
0357 struct cio2_csi2_timing timing;
0358 int i, r;
0359
0360 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
0361 if (!fmt)
0362 return -EINVAL;
0363
0364 lanes = q->csi2.lanes;
0365
0366 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
0367 if (r)
0368 return r;
0369
0370 writel(timing.clk_termen, q->csi_rx_base +
0371 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
0372 writel(timing.clk_settle, q->csi_rx_base +
0373 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
0374
0375 for (i = 0; i < lanes; i++) {
0376 writel(timing.dat_termen, q->csi_rx_base +
0377 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
0378 writel(timing.dat_settle, q->csi_rx_base +
0379 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
0380 }
0381
0382 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
0383 CIO2_PBM_WMCTRL1_MID1_2CK |
0384 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
0385 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
0386 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
0387 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
0388 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
0389 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
0390 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
0391 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
0392 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
0393 CIO2_PBM_ARB_CTRL_LE_EN |
0394 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
0395 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
0396 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
0397 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
0398 base + CIO2_REG_PBM_ARB_CTRL);
0399 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
0400 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
0401 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
0402 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
0403
0404 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
0405 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
0406
0407
0408 for (i = 0; i < NUM_VCS; i++)
0409 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
0410
0411
0412 for (i = 0; i < 16; i++)
0413 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
0414 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
0415 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
0416 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
0417
0418 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
0419 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
0420 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
0421 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
0422 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
0423 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
0424
0425 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
0426 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
0427 base + CIO2_REG_INT_EN);
0428
0429 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
0430 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
0431 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
0432 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
0433 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
0434 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
0435 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
0436 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
0437 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
0438 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
0439
0440 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
0441 writel(CIO2_CGC_PRIM_TGE |
0442 CIO2_CGC_SIDE_TGE |
0443 CIO2_CGC_XOSC_TGE |
0444 CIO2_CGC_D3I3_TGE |
0445 CIO2_CGC_CSI2_INTERFRAME_TGE |
0446 CIO2_CGC_CSI2_PORT_DCGE |
0447 CIO2_CGC_SIDE_DCGE |
0448 CIO2_CGC_PRIM_DCGE |
0449 CIO2_CGC_ROSC_DCGE |
0450 CIO2_CGC_XOSC_DCGE |
0451 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
0452 CIO2_CGC_CSI_CLKGATE_HOLDOFF
0453 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
0454 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
0455 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
0456 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
0457 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
0458 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
0459 base + CIO2_REG_LTRVAL01);
0460 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
0461 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
0462 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
0463 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
0464 base + CIO2_REG_LTRVAL23);
0465
0466 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
0467 writel(0, base + CIO2_REG_CDMABA(i));
0468 writel(0, base + CIO2_REG_CDMAC0(i));
0469 writel(0, base + CIO2_REG_CDMAC1(i));
0470 }
0471
0472
0473 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
0474
0475 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
0476 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
0477 CIO2_CDMAC0_DMA_INTR_ON_FE |
0478 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
0479 CIO2_CDMAC0_DMA_EN |
0480 CIO2_CDMAC0_DMA_INTR_ON_FS |
0481 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
0482
0483 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
0484 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
0485
0486 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
0487
0488 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
0489 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
0490 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
0491 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
0492 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
0493
0494
0495 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
0496 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
0497 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
0498 writel(~0, base + CIO2_REG_INT_STS);
0499
0500
0501 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
0502 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
0503
0504 return 0;
0505 }
0506
0507 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
0508 {
0509 struct device *dev = &cio2->pci_dev->dev;
0510 void __iomem *const base = cio2->base;
0511 unsigned int i;
0512 u32 value;
0513 int ret;
0514
0515
0516 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
0517 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
0518 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
0519 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
0520
0521
0522 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
0523 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
0524 value, value & CIO2_CDMAC0_DMA_HALTED,
0525 4000, 2000000);
0526 if (ret)
0527 dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
0528
0529 for (i = 0; i < CIO2_NUM_PORTS; i++) {
0530 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
0531 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
0532 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
0533 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
0534 }
0535 }
0536
0537 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
0538 {
0539 struct device *dev = &cio2->pci_dev->dev;
0540 struct cio2_queue *q = cio2->cur_queue;
0541 struct cio2_fbpt_entry *entry;
0542 u64 ns = ktime_get_ns();
0543
0544 if (dma_chan >= CIO2_QUEUES) {
0545 dev_err(dev, "bad DMA channel %i\n", dma_chan);
0546 return;
0547 }
0548
0549 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
0550 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
0551 dev_warn(dev, "no ready buffers found on DMA channel %u\n",
0552 dma_chan);
0553 return;
0554 }
0555
0556
0557 do {
0558 struct cio2_buffer *b;
0559
0560 b = q->bufs[q->bufs_first];
0561 if (b) {
0562 unsigned int received = entry[1].second_entry.num_of_bytes;
0563 unsigned long payload =
0564 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
0565
0566 q->bufs[q->bufs_first] = NULL;
0567 atomic_dec(&q->bufs_queued);
0568 dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
0569
0570 b->vbb.vb2_buf.timestamp = ns;
0571 b->vbb.field = V4L2_FIELD_NONE;
0572 b->vbb.sequence = atomic_read(&q->frame_sequence);
0573 if (payload != received)
0574 dev_warn(dev,
0575 "payload length is %lu, received %u\n",
0576 payload, received);
0577 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
0578 }
0579 atomic_inc(&q->frame_sequence);
0580 cio2_fbpt_entry_init_dummy(cio2, entry);
0581 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
0582 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
0583 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
0584 }
0585
0586 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
0587 {
0588
0589
0590
0591
0592
0593 struct v4l2_event event = {
0594 .type = V4L2_EVENT_FRAME_SYNC,
0595 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
0596 };
0597
0598 v4l2_event_queue(q->subdev.devnode, &event);
0599 }
0600
0601 static const char *const cio2_irq_errs[] = {
0602 "single packet header error corrected",
0603 "multiple packet header errors detected",
0604 "payload checksum (CRC) error",
0605 "fifo overflow",
0606 "reserved short packet data type detected",
0607 "reserved long packet data type detected",
0608 "incomplete long packet detected",
0609 "frame sync error",
0610 "line sync error",
0611 "DPHY start of transmission error",
0612 "DPHY synchronization error",
0613 "escape mode error",
0614 "escape mode trigger event",
0615 "escape mode ultra-low power state for data lane(s)",
0616 "escape mode ultra-low power state exit for clock lane",
0617 "inter-frame short packet discarded",
0618 "inter-frame long packet discarded",
0619 "non-matching Long Packet stalled",
0620 };
0621
0622 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
0623 {
0624 unsigned long csi2_status = status;
0625 unsigned int i;
0626
0627 for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
0628 dev_err(dev, "CSI-2 receiver port %i: %s\n",
0629 port, cio2_irq_errs[i]);
0630
0631 if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
0632 dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
0633 csi2_status, port);
0634 }
0635
0636 static const char *const cio2_port_errs[] = {
0637 "ECC recoverable",
0638 "DPHY not recoverable",
0639 "ECC not recoverable",
0640 "CRC error",
0641 "INTERFRAMEDATA",
0642 "PKT2SHORT",
0643 "PKT2LONG",
0644 };
0645
0646 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
0647 {
0648 unsigned long port_status = status;
0649 unsigned int i;
0650
0651 for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
0652 dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
0653 }
0654
0655 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
0656 {
0657 struct device *dev = &cio2->pci_dev->dev;
0658 void __iomem *const base = cio2->base;
0659
0660 if (int_status & CIO2_INT_IOOE) {
0661
0662
0663
0664
0665
0666 u32 oe_status, oe_clear;
0667
0668 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
0669 oe_status = oe_clear;
0670
0671 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
0672 dev_err(dev, "DMA output error: 0x%x\n",
0673 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
0674 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
0675 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
0676 }
0677 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
0678 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
0679 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
0680 >> CIO2_INT_EXT_OE_OES_SHIFT);
0681 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
0682 }
0683 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
0684 if (oe_status)
0685 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
0686 oe_status);
0687 int_status &= ~CIO2_INT_IOOE;
0688 }
0689
0690 if (int_status & CIO2_INT_IOC_MASK) {
0691
0692 u32 clr = 0;
0693 unsigned int d;
0694
0695 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
0696 if (int_status & CIO2_INT_IOC(d)) {
0697 clr |= CIO2_INT_IOC(d);
0698 cio2_buffer_done(cio2, d);
0699 }
0700 int_status &= ~clr;
0701 }
0702
0703 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
0704
0705 u32 clr = 0;
0706 unsigned int d;
0707
0708 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
0709 if (int_status & CIO2_INT_IOS_IOLN(d)) {
0710 clr |= CIO2_INT_IOS_IOLN(d);
0711 if (d == CIO2_DMA_CHAN)
0712 cio2_queue_event_sof(cio2,
0713 cio2->cur_queue);
0714 }
0715 int_status &= ~clr;
0716 }
0717
0718 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
0719
0720 unsigned int port;
0721 u32 ie_status;
0722
0723 ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
0724
0725 for (port = 0; port < CIO2_NUM_PORTS; port++) {
0726 u32 port_status = (ie_status >> (port * 8)) & 0xff;
0727
0728 cio2_irq_log_port_errs(dev, port, port_status);
0729
0730 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
0731 void __iomem *csi_rx_base =
0732 base + CIO2_REG_PIPE_BASE(port);
0733 u32 csi2_status;
0734
0735 csi2_status = readl(csi_rx_base +
0736 CIO2_REG_IRQCTRL_STATUS);
0737
0738 cio2_irq_log_irq_errs(dev, port, csi2_status);
0739
0740 writel(csi2_status,
0741 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
0742 }
0743 }
0744
0745 writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
0746
0747 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
0748 }
0749
0750 if (int_status)
0751 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
0752 }
0753
0754 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
0755 {
0756 struct cio2_device *cio2 = cio2_ptr;
0757 void __iomem *const base = cio2->base;
0758 struct device *dev = &cio2->pci_dev->dev;
0759 u32 int_status;
0760
0761 int_status = readl(base + CIO2_REG_INT_STS);
0762 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
0763 if (!int_status)
0764 return IRQ_NONE;
0765
0766 do {
0767 writel(int_status, base + CIO2_REG_INT_STS);
0768 cio2_irq_handle_once(cio2, int_status);
0769 int_status = readl(base + CIO2_REG_INT_STS);
0770 if (int_status)
0771 dev_dbg(dev, "pending status 0x%x\n", int_status);
0772 } while (int_status);
0773
0774 return IRQ_HANDLED;
0775 }
0776
0777
0778
0779 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
0780 enum vb2_buffer_state state)
0781 {
0782 unsigned int i;
0783
0784 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
0785 if (q->bufs[i]) {
0786 atomic_dec(&q->bufs_queued);
0787 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
0788 state);
0789 q->bufs[i] = NULL;
0790 }
0791 }
0792 }
0793
0794 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
0795 unsigned int *num_buffers,
0796 unsigned int *num_planes,
0797 unsigned int sizes[],
0798 struct device *alloc_devs[])
0799 {
0800 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
0801 struct device *dev = &cio2->pci_dev->dev;
0802 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
0803 unsigned int i;
0804
0805 if (*num_planes && *num_planes < q->format.num_planes)
0806 return -EINVAL;
0807
0808 for (i = 0; i < q->format.num_planes; ++i) {
0809 if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
0810 return -EINVAL;
0811 sizes[i] = q->format.plane_fmt[i].sizeimage;
0812 alloc_devs[i] = dev;
0813 }
0814
0815 *num_planes = q->format.num_planes;
0816 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
0817
0818
0819 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
0820 q->bufs[i] = NULL;
0821 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
0822 }
0823 atomic_set(&q->bufs_queued, 0);
0824 q->bufs_first = 0;
0825 q->bufs_next = 0;
0826
0827 return 0;
0828 }
0829
0830
0831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
0832 {
0833 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
0834 struct device *dev = &cio2->pci_dev->dev;
0835 struct cio2_buffer *b = to_cio2_buffer(vb);
0836 unsigned int pages = PFN_UP(vb->planes[0].length);
0837 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
0838 struct sg_table *sg;
0839 struct sg_dma_page_iter sg_iter;
0840 unsigned int i, j;
0841
0842 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
0843 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
0844 vb->planes[0].length);
0845 return -ENOSPC;
0846 }
0847
0848 memset(b->lop, 0, sizeof(b->lop));
0849
0850 for (i = 0; i < lops; i++) {
0851 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
0852 &b->lop_bus_addr[i], GFP_KERNEL);
0853 if (!b->lop[i])
0854 goto fail;
0855 }
0856
0857
0858 sg = vb2_dma_sg_plane_desc(vb, 0);
0859 if (!sg)
0860 return -ENOMEM;
0861
0862 if (sg->nents && sg->sgl)
0863 b->offset = sg->sgl->offset;
0864
0865 i = j = 0;
0866 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
0867 if (!pages--)
0868 break;
0869 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
0870 j++;
0871 if (j == CIO2_LOP_ENTRIES) {
0872 i++;
0873 j = 0;
0874 }
0875 }
0876
0877 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
0878 return 0;
0879 fail:
0880 while (i--)
0881 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
0882 return -ENOMEM;
0883 }
0884
0885
0886 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
0887 {
0888 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
0889 struct device *dev = &cio2->pci_dev->dev;
0890 struct cio2_queue *q =
0891 container_of(vb->vb2_queue, struct cio2_queue, vbq);
0892 struct cio2_buffer *b = to_cio2_buffer(vb);
0893 struct cio2_fbpt_entry *entry;
0894 unsigned long flags;
0895 unsigned int i, j, next = q->bufs_next;
0896 int bufs_queued = atomic_inc_return(&q->bufs_queued);
0897 u32 fbpt_rp;
0898
0899 dev_dbg(dev, "queue buffer %d\n", vb->index);
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914 local_irq_save(flags);
0915
0916 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
0917 >> CIO2_CDMARI_FBPT_RP_SHIFT)
0918 & CIO2_CDMARI_FBPT_RP_MASK;
0919
0920
0921
0922
0923
0924
0925 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
0926
0927 if (bufs_queued <= 1 || fbpt_rp == next)
0928
0929 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
0930
0931 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
0932
0933
0934
0935
0936
0937
0938 if (!q->bufs[next]) {
0939 q->bufs[next] = b;
0940 entry = &q->fbpt[next * CIO2_MAX_LOPS];
0941 cio2_fbpt_entry_init_buf(cio2, b, entry);
0942 local_irq_restore(flags);
0943 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
0944 for (j = 0; j < vb->num_planes; j++)
0945 vb2_set_plane_payload(vb, j,
0946 q->format.plane_fmt[j].sizeimage);
0947 return;
0948 }
0949
0950 dev_dbg(dev, "entry %i was full!\n", next);
0951 next = (next + 1) % CIO2_MAX_BUFFERS;
0952 }
0953
0954 local_irq_restore(flags);
0955 dev_err(dev, "error: all cio2 entries were full!\n");
0956 atomic_dec(&q->bufs_queued);
0957 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
0958 }
0959
0960
0961 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
0962 {
0963 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
0964 struct device *dev = &cio2->pci_dev->dev;
0965 struct cio2_buffer *b = to_cio2_buffer(vb);
0966 unsigned int i;
0967
0968
0969 for (i = 0; i < CIO2_MAX_LOPS; i++) {
0970 if (b->lop[i])
0971 dma_free_coherent(dev, PAGE_SIZE,
0972 b->lop[i], b->lop_bus_addr[i]);
0973 }
0974 }
0975
0976 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
0977 {
0978 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
0979 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
0980 struct device *dev = &cio2->pci_dev->dev;
0981 int r;
0982
0983 cio2->cur_queue = q;
0984 atomic_set(&q->frame_sequence, 0);
0985
0986 r = pm_runtime_resume_and_get(dev);
0987 if (r < 0) {
0988 dev_info(dev, "failed to set power %d\n", r);
0989 return r;
0990 }
0991
0992 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
0993 if (r)
0994 goto fail_pipeline;
0995
0996 r = cio2_hw_init(cio2, q);
0997 if (r)
0998 goto fail_hw;
0999
1000
1001 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1002 if (r)
1003 goto fail_csi2_subdev;
1004
1005 cio2->streaming = true;
1006
1007 return 0;
1008
1009 fail_csi2_subdev:
1010 cio2_hw_exit(cio2, q);
1011 fail_hw:
1012 media_pipeline_stop(&q->vdev.entity);
1013 fail_pipeline:
1014 dev_dbg(dev, "failed to start streaming (%d)\n", r);
1015 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1016 pm_runtime_put(dev);
1017
1018 return r;
1019 }
1020
1021 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1022 {
1023 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1024 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1025 struct device *dev = &cio2->pci_dev->dev;
1026
1027 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 dev_err(dev, "failed to stop sensor streaming\n");
1029
1030 cio2_hw_exit(cio2, q);
1031 synchronize_irq(cio2->pci_dev->irq);
1032 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1033 media_pipeline_stop(&q->vdev.entity);
1034 pm_runtime_put(dev);
1035 cio2->streaming = false;
1036 }
1037
1038 static const struct vb2_ops cio2_vb2_ops = {
1039 .buf_init = cio2_vb2_buf_init,
1040 .buf_queue = cio2_vb2_buf_queue,
1041 .buf_cleanup = cio2_vb2_buf_cleanup,
1042 .queue_setup = cio2_vb2_queue_setup,
1043 .start_streaming = cio2_vb2_start_streaming,
1044 .stop_streaming = cio2_vb2_stop_streaming,
1045 .wait_prepare = vb2_ops_wait_prepare,
1046 .wait_finish = vb2_ops_wait_finish,
1047 };
1048
1049
1050
1051 static int cio2_v4l2_querycap(struct file *file, void *fh,
1052 struct v4l2_capability *cap)
1053 {
1054 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1055 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1056
1057 return 0;
1058 }
1059
1060 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1061 struct v4l2_fmtdesc *f)
1062 {
1063 if (f->index >= ARRAY_SIZE(formats))
1064 return -EINVAL;
1065
1066 f->pixelformat = formats[f->index].fourcc;
1067
1068 return 0;
1069 }
1070
1071
1072 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1073 {
1074 struct cio2_queue *q = file_to_cio2_queue(file);
1075
1076 f->fmt.pix_mp = q->format;
1077
1078 return 0;
1079 }
1080
1081 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1082 {
1083 const struct ipu3_cio2_fmt *fmt;
1084 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1085
1086 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1087 if (!fmt)
1088 fmt = &formats[0];
1089
1090
1091 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1092 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1093 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1094 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1095
1096 mpix->num_planes = 1;
1097 mpix->pixelformat = fmt->fourcc;
1098 mpix->colorspace = V4L2_COLORSPACE_RAW;
1099 mpix->field = V4L2_FIELD_NONE;
1100 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1101 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1102 mpix->height;
1103
1104
1105 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1106 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1107 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1108
1109 return 0;
1110 }
1111
1112 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1113 {
1114 struct cio2_queue *q = file_to_cio2_queue(file);
1115
1116 cio2_v4l2_try_fmt(file, fh, f);
1117 q->format = f->fmt.pix_mp;
1118
1119 return 0;
1120 }
1121
1122 static int
1123 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1124 {
1125 if (input->index > 0)
1126 return -EINVAL;
1127
1128 strscpy(input->name, "camera", sizeof(input->name));
1129 input->type = V4L2_INPUT_TYPE_CAMERA;
1130
1131 return 0;
1132 }
1133
1134 static int
1135 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1136 {
1137 *input = 0;
1138
1139 return 0;
1140 }
1141
1142 static int
1143 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1144 {
1145 return input == 0 ? 0 : -EINVAL;
1146 }
1147
1148 static const struct v4l2_file_operations cio2_v4l2_fops = {
1149 .owner = THIS_MODULE,
1150 .unlocked_ioctl = video_ioctl2,
1151 .open = v4l2_fh_open,
1152 .release = vb2_fop_release,
1153 .poll = vb2_fop_poll,
1154 .mmap = vb2_fop_mmap,
1155 };
1156
1157 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1158 .vidioc_querycap = cio2_v4l2_querycap,
1159 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1160 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1161 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1162 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1163 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1164 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1165 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1166 .vidioc_querybuf = vb2_ioctl_querybuf,
1167 .vidioc_qbuf = vb2_ioctl_qbuf,
1168 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1169 .vidioc_streamon = vb2_ioctl_streamon,
1170 .vidioc_streamoff = vb2_ioctl_streamoff,
1171 .vidioc_expbuf = vb2_ioctl_expbuf,
1172 .vidioc_enum_input = cio2_video_enum_input,
1173 .vidioc_g_input = cio2_video_g_input,
1174 .vidioc_s_input = cio2_video_s_input,
1175 };
1176
1177 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1178 struct v4l2_fh *fh,
1179 struct v4l2_event_subscription *sub)
1180 {
1181 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1182 return -EINVAL;
1183
1184
1185 if (sub->id != 0)
1186 return -EINVAL;
1187
1188 return v4l2_event_subscribe(fh, sub, 0, NULL);
1189 }
1190
1191 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1192 {
1193 struct v4l2_mbus_framefmt *format;
1194 const struct v4l2_mbus_framefmt fmt_default = {
1195 .width = 1936,
1196 .height = 1096,
1197 .code = formats[0].mbus_code,
1198 .field = V4L2_FIELD_NONE,
1199 .colorspace = V4L2_COLORSPACE_RAW,
1200 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1201 .quantization = V4L2_QUANTIZATION_DEFAULT,
1202 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1203 };
1204
1205
1206 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1207 *format = fmt_default;
1208
1209
1210 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1211 *format = fmt_default;
1212
1213 return 0;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1224 struct v4l2_subdev_state *sd_state,
1225 struct v4l2_subdev_format *fmt)
1226 {
1227 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1228
1229 mutex_lock(&q->subdev_lock);
1230
1231 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1232 fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1233 fmt->pad);
1234 else
1235 fmt->format = q->subdev_fmt;
1236
1237 mutex_unlock(&q->subdev_lock);
1238
1239 return 0;
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1250 struct v4l2_subdev_state *sd_state,
1251 struct v4l2_subdev_format *fmt)
1252 {
1253 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1254 struct v4l2_mbus_framefmt *mbus;
1255 u32 mbus_code = fmt->format.code;
1256 unsigned int i;
1257
1258
1259
1260
1261
1262 if (fmt->pad == CIO2_PAD_SOURCE)
1263 return cio2_subdev_get_fmt(sd, sd_state, fmt);
1264
1265 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1266 mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1267 else
1268 mbus = &q->subdev_fmt;
1269
1270 fmt->format.code = formats[0].mbus_code;
1271
1272 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1273 if (formats[i].mbus_code == mbus_code) {
1274 fmt->format.code = mbus_code;
1275 break;
1276 }
1277 }
1278
1279 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1280 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1281 fmt->format.field = V4L2_FIELD_NONE;
1282
1283 mutex_lock(&q->subdev_lock);
1284 *mbus = fmt->format;
1285 mutex_unlock(&q->subdev_lock);
1286
1287 return 0;
1288 }
1289
1290 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1291 struct v4l2_subdev_state *sd_state,
1292 struct v4l2_subdev_mbus_code_enum *code)
1293 {
1294 if (code->index >= ARRAY_SIZE(formats))
1295 return -EINVAL;
1296
1297 code->code = formats[code->index].mbus_code;
1298 return 0;
1299 }
1300
1301 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1302 struct v4l2_subdev_format *fmt)
1303 {
1304 if (is_media_entity_v4l2_subdev(pad->entity)) {
1305 struct v4l2_subdev *sd =
1306 media_entity_to_v4l2_subdev(pad->entity);
1307
1308 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1309 fmt->pad = pad->index;
1310 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1311 }
1312
1313 return -EINVAL;
1314 }
1315
1316 static int cio2_video_link_validate(struct media_link *link)
1317 {
1318 struct media_entity *entity = link->sink->entity;
1319 struct video_device *vd = media_entity_to_video_device(entity);
1320 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1321 struct cio2_device *cio2 = video_get_drvdata(vd);
1322 struct device *dev = &cio2->pci_dev->dev;
1323 struct v4l2_subdev_format source_fmt;
1324 int ret;
1325
1326 if (!media_pad_remote_pad_first(entity->pads)) {
1327 dev_info(dev, "video node %s pad not connected\n", vd->name);
1328 return -ENOTCONN;
1329 }
1330
1331 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1332 if (ret < 0)
1333 return 0;
1334
1335 if (source_fmt.format.width != q->format.width ||
1336 source_fmt.format.height != q->format.height) {
1337 dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1338 q->format.width, q->format.height,
1339 source_fmt.format.width, source_fmt.format.height);
1340 return -EINVAL;
1341 }
1342
1343 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1344 return -EINVAL;
1345
1346 return 0;
1347 }
1348
1349 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1350 .subscribe_event = cio2_subdev_subscribe_event,
1351 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1352 };
1353
1354 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1355 .open = cio2_subdev_open,
1356 };
1357
1358 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1359 .link_validate = v4l2_subdev_link_validate_default,
1360 .get_fmt = cio2_subdev_get_fmt,
1361 .set_fmt = cio2_subdev_set_fmt,
1362 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1363 };
1364
1365 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1366 .core = &cio2_subdev_core_ops,
1367 .pad = &cio2_subdev_pad_ops,
1368 };
1369
1370
1371
1372 struct sensor_async_subdev {
1373 struct v4l2_async_subdev asd;
1374 struct csi2_bus_info csi2;
1375 };
1376
1377 #define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
1378
1379
1380 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1381 struct v4l2_subdev *sd,
1382 struct v4l2_async_subdev *asd)
1383 {
1384 struct cio2_device *cio2 = to_cio2_device(notifier);
1385 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1386 struct cio2_queue *q;
1387
1388 if (cio2->queue[s_asd->csi2.port].sensor)
1389 return -EBUSY;
1390
1391 q = &cio2->queue[s_asd->csi2.port];
1392
1393 q->csi2 = s_asd->csi2;
1394 q->sensor = sd;
1395 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1396
1397 return 0;
1398 }
1399
1400
1401 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1402 struct v4l2_subdev *sd,
1403 struct v4l2_async_subdev *asd)
1404 {
1405 struct cio2_device *cio2 = to_cio2_device(notifier);
1406 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1407
1408 cio2->queue[s_asd->csi2.port].sensor = NULL;
1409 }
1410
1411
1412 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1413 {
1414 struct cio2_device *cio2 = to_cio2_device(notifier);
1415 struct device *dev = &cio2->pci_dev->dev;
1416 struct sensor_async_subdev *s_asd;
1417 struct v4l2_async_subdev *asd;
1418 struct cio2_queue *q;
1419 unsigned int pad;
1420 int ret;
1421
1422 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1423 s_asd = to_sensor_asd(asd);
1424 q = &cio2->queue[s_asd->csi2.port];
1425
1426 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1427 if (q->sensor->entity.pads[pad].flags &
1428 MEDIA_PAD_FL_SOURCE)
1429 break;
1430
1431 if (pad == q->sensor->entity.num_pads) {
1432 dev_err(dev, "failed to find src pad for %s\n",
1433 q->sensor->name);
1434 return -ENXIO;
1435 }
1436
1437 ret = media_create_pad_link(
1438 &q->sensor->entity, pad,
1439 &q->subdev.entity, CIO2_PAD_SINK,
1440 0);
1441 if (ret) {
1442 dev_err(dev, "failed to create link for %s\n",
1443 q->sensor->name);
1444 return ret;
1445 }
1446 }
1447
1448 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1449 }
1450
1451 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1452 .bound = cio2_notifier_bound,
1453 .unbind = cio2_notifier_unbind,
1454 .complete = cio2_notifier_complete,
1455 };
1456
1457 static int cio2_parse_firmware(struct cio2_device *cio2)
1458 {
1459 struct device *dev = &cio2->pci_dev->dev;
1460 unsigned int i;
1461 int ret;
1462
1463 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1464 struct v4l2_fwnode_endpoint vep = {
1465 .bus_type = V4L2_MBUS_CSI2_DPHY
1466 };
1467 struct sensor_async_subdev *s_asd;
1468 struct fwnode_handle *ep;
1469
1470 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1471 FWNODE_GRAPH_ENDPOINT_NEXT);
1472 if (!ep)
1473 continue;
1474
1475 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1476 if (ret)
1477 goto err_parse;
1478
1479 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1480 struct
1481 sensor_async_subdev);
1482 if (IS_ERR(s_asd)) {
1483 ret = PTR_ERR(s_asd);
1484 goto err_parse;
1485 }
1486
1487 s_asd->csi2.port = vep.base.port;
1488 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1489
1490 fwnode_handle_put(ep);
1491
1492 continue;
1493
1494 err_parse:
1495 fwnode_handle_put(ep);
1496 return ret;
1497 }
1498
1499
1500
1501
1502
1503 cio2->notifier.ops = &cio2_async_ops;
1504 ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
1505 if (ret)
1506 dev_err(dev, "failed to register async notifier : %d\n", ret);
1507
1508 return ret;
1509 }
1510
1511
1512 static const struct media_entity_operations cio2_media_ops = {
1513 .link_validate = v4l2_subdev_link_validate,
1514 };
1515
1516 static const struct media_entity_operations cio2_video_entity_ops = {
1517 .link_validate = cio2_video_link_validate,
1518 };
1519
1520 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1521 {
1522 static const u32 default_width = 1936;
1523 static const u32 default_height = 1096;
1524 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1525 struct device *dev = &cio2->pci_dev->dev;
1526 struct video_device *vdev = &q->vdev;
1527 struct vb2_queue *vbq = &q->vbq;
1528 struct v4l2_subdev *subdev = &q->subdev;
1529 struct v4l2_mbus_framefmt *fmt;
1530 int r;
1531
1532
1533 mutex_init(&q->lock);
1534 mutex_init(&q->subdev_lock);
1535
1536
1537 fmt = &q->subdev_fmt;
1538 fmt->width = default_width;
1539 fmt->height = default_height;
1540 fmt->code = dflt_fmt.mbus_code;
1541 fmt->field = V4L2_FIELD_NONE;
1542
1543 q->format.width = default_width;
1544 q->format.height = default_height;
1545 q->format.pixelformat = dflt_fmt.fourcc;
1546 q->format.colorspace = V4L2_COLORSPACE_RAW;
1547 q->format.field = V4L2_FIELD_NONE;
1548 q->format.num_planes = 1;
1549 q->format.plane_fmt[0].bytesperline =
1550 cio2_bytesperline(q->format.width);
1551 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1552 q->format.height;
1553
1554
1555 r = cio2_fbpt_init(cio2, q);
1556 if (r)
1557 goto fail_fbpt;
1558
1559
1560 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1561 MEDIA_PAD_FL_MUST_CONNECT;
1562 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1563 subdev->entity.ops = &cio2_media_ops;
1564 subdev->internal_ops = &cio2_subdev_internal_ops;
1565 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1566 if (r) {
1567 dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1568 goto fail_subdev_media_entity;
1569 }
1570
1571 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1572 vdev->entity.ops = &cio2_video_entity_ops;
1573 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1574 if (r) {
1575 dev_err(dev, "failed initialize videodev media entity (%d)\n",
1576 r);
1577 goto fail_vdev_media_entity;
1578 }
1579
1580
1581 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1582 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1583 subdev->owner = THIS_MODULE;
1584 snprintf(subdev->name, sizeof(subdev->name),
1585 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1586 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1587 v4l2_set_subdevdata(subdev, cio2);
1588 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1589 if (r) {
1590 dev_err(dev, "failed initialize subdev (%d)\n", r);
1591 goto fail_subdev;
1592 }
1593
1594
1595 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1596 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1597 vbq->ops = &cio2_vb2_ops;
1598 vbq->mem_ops = &vb2_dma_sg_memops;
1599 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1600 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1601 vbq->min_buffers_needed = 1;
1602 vbq->drv_priv = cio2;
1603 vbq->lock = &q->lock;
1604 r = vb2_queue_init(vbq);
1605 if (r) {
1606 dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1607 goto fail_subdev;
1608 }
1609
1610
1611 snprintf(vdev->name, sizeof(vdev->name),
1612 "%s %td", CIO2_NAME, q - cio2->queue);
1613 vdev->release = video_device_release_empty;
1614 vdev->fops = &cio2_v4l2_fops;
1615 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1616 vdev->lock = &cio2->lock;
1617 vdev->v4l2_dev = &cio2->v4l2_dev;
1618 vdev->queue = &q->vbq;
1619 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1620 video_set_drvdata(vdev, cio2);
1621 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1622 if (r) {
1623 dev_err(dev, "failed to register video device (%d)\n", r);
1624 goto fail_vdev;
1625 }
1626
1627
1628 r = media_create_pad_link(
1629 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1630 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1631 if (r)
1632 goto fail_link;
1633
1634 return 0;
1635
1636 fail_link:
1637 vb2_video_unregister_device(&q->vdev);
1638 fail_vdev:
1639 v4l2_device_unregister_subdev(subdev);
1640 fail_subdev:
1641 media_entity_cleanup(&vdev->entity);
1642 fail_vdev_media_entity:
1643 media_entity_cleanup(&subdev->entity);
1644 fail_subdev_media_entity:
1645 cio2_fbpt_exit(q, dev);
1646 fail_fbpt:
1647 mutex_destroy(&q->subdev_lock);
1648 mutex_destroy(&q->lock);
1649
1650 return r;
1651 }
1652
1653 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1654 {
1655 vb2_video_unregister_device(&q->vdev);
1656 media_entity_cleanup(&q->vdev.entity);
1657 v4l2_device_unregister_subdev(&q->subdev);
1658 media_entity_cleanup(&q->subdev.entity);
1659 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1660 mutex_destroy(&q->subdev_lock);
1661 mutex_destroy(&q->lock);
1662 }
1663
1664 static int cio2_queues_init(struct cio2_device *cio2)
1665 {
1666 int i, r;
1667
1668 for (i = 0; i < CIO2_QUEUES; i++) {
1669 r = cio2_queue_init(cio2, &cio2->queue[i]);
1670 if (r)
1671 break;
1672 }
1673
1674 if (i == CIO2_QUEUES)
1675 return 0;
1676
1677 for (i--; i >= 0; i--)
1678 cio2_queue_exit(cio2, &cio2->queue[i]);
1679
1680 return r;
1681 }
1682
1683 static void cio2_queues_exit(struct cio2_device *cio2)
1684 {
1685 unsigned int i;
1686
1687 for (i = 0; i < CIO2_QUEUES; i++)
1688 cio2_queue_exit(cio2, &cio2->queue[i]);
1689 }
1690
1691 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1692 {
1693 struct fwnode_handle *endpoint;
1694
1695 if (IS_ERR_OR_NULL(fwnode))
1696 return -EINVAL;
1697
1698 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1699 if (endpoint) {
1700 fwnode_handle_put(endpoint);
1701 return 0;
1702 }
1703
1704 return cio2_check_fwnode_graph(fwnode->secondary);
1705 }
1706
1707
1708
1709 static int cio2_pci_probe(struct pci_dev *pci_dev,
1710 const struct pci_device_id *id)
1711 {
1712 struct device *dev = &pci_dev->dev;
1713 struct fwnode_handle *fwnode = dev_fwnode(dev);
1714 struct cio2_device *cio2;
1715 int r;
1716
1717
1718
1719
1720
1721
1722 r = cio2_check_fwnode_graph(fwnode);
1723 if (r) {
1724 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1725 dev_err(dev, "fwnode graph has no endpoints connected\n");
1726 return -EINVAL;
1727 }
1728
1729 r = cio2_bridge_init(pci_dev);
1730 if (r)
1731 return r;
1732 }
1733
1734 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1735 if (!cio2)
1736 return -ENOMEM;
1737 cio2->pci_dev = pci_dev;
1738
1739 r = pcim_enable_device(pci_dev);
1740 if (r) {
1741 dev_err(dev, "failed to enable device (%d)\n", r);
1742 return r;
1743 }
1744
1745 dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1746 pci_dev->device, pci_dev->revision);
1747
1748 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1749 if (r) {
1750 dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1751 return -ENODEV;
1752 }
1753
1754 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1755
1756 pci_set_drvdata(pci_dev, cio2);
1757
1758 pci_set_master(pci_dev);
1759
1760 r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1761 if (r) {
1762 dev_err(dev, "failed to set DMA mask (%d)\n", r);
1763 return -ENODEV;
1764 }
1765
1766 r = pci_enable_msi(pci_dev);
1767 if (r) {
1768 dev_err(dev, "failed to enable MSI (%d)\n", r);
1769 return r;
1770 }
1771
1772 r = cio2_fbpt_init_dummy(cio2);
1773 if (r)
1774 return r;
1775
1776 mutex_init(&cio2->lock);
1777
1778 cio2->media_dev.dev = dev;
1779 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1780 sizeof(cio2->media_dev.model));
1781 cio2->media_dev.hw_revision = 0;
1782
1783 media_device_init(&cio2->media_dev);
1784 r = media_device_register(&cio2->media_dev);
1785 if (r < 0)
1786 goto fail_mutex_destroy;
1787
1788 cio2->v4l2_dev.mdev = &cio2->media_dev;
1789 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1790 if (r) {
1791 dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1792 goto fail_media_device_unregister;
1793 }
1794
1795 r = cio2_queues_init(cio2);
1796 if (r)
1797 goto fail_v4l2_device_unregister;
1798
1799 v4l2_async_nf_init(&cio2->notifier);
1800
1801
1802 r = cio2_parse_firmware(cio2);
1803 if (r)
1804 goto fail_clean_notifier;
1805
1806 r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1807 CIO2_NAME, cio2);
1808 if (r) {
1809 dev_err(dev, "failed to request IRQ (%d)\n", r);
1810 goto fail_clean_notifier;
1811 }
1812
1813 pm_runtime_put_noidle(dev);
1814 pm_runtime_allow(dev);
1815
1816 return 0;
1817
1818 fail_clean_notifier:
1819 v4l2_async_nf_unregister(&cio2->notifier);
1820 v4l2_async_nf_cleanup(&cio2->notifier);
1821 cio2_queues_exit(cio2);
1822 fail_v4l2_device_unregister:
1823 v4l2_device_unregister(&cio2->v4l2_dev);
1824 fail_media_device_unregister:
1825 media_device_unregister(&cio2->media_dev);
1826 media_device_cleanup(&cio2->media_dev);
1827 fail_mutex_destroy:
1828 mutex_destroy(&cio2->lock);
1829 cio2_fbpt_exit_dummy(cio2);
1830
1831 return r;
1832 }
1833
1834 static void cio2_pci_remove(struct pci_dev *pci_dev)
1835 {
1836 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1837
1838 media_device_unregister(&cio2->media_dev);
1839 v4l2_async_nf_unregister(&cio2->notifier);
1840 v4l2_async_nf_cleanup(&cio2->notifier);
1841 cio2_queues_exit(cio2);
1842 cio2_fbpt_exit_dummy(cio2);
1843 v4l2_device_unregister(&cio2->v4l2_dev);
1844 media_device_cleanup(&cio2->media_dev);
1845 mutex_destroy(&cio2->lock);
1846 }
1847
1848 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1849 {
1850 struct pci_dev *pci_dev = to_pci_dev(dev);
1851 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1852 void __iomem *const base = cio2->base;
1853 u16 pm;
1854
1855 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1856 dev_dbg(dev, "cio2 runtime suspend.\n");
1857
1858 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1859 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1860 pm |= CIO2_PMCSR_D3;
1861 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1862
1863 return 0;
1864 }
1865
1866 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1867 {
1868 struct pci_dev *pci_dev = to_pci_dev(dev);
1869 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1870 void __iomem *const base = cio2->base;
1871 u16 pm;
1872
1873 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1874 dev_dbg(dev, "cio2 runtime resume.\n");
1875
1876 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1877 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1878 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1879
1880 return 0;
1881 }
1882
1883
1884
1885
1886
1887 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1888 {
1889 struct {
1890 size_t begin, end;
1891 } arr[2] = {
1892 { 0, start - 1 },
1893 { start, elems - 1 },
1894 };
1895
1896 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1897
1898
1899 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1900 size_t size0, i;
1901
1902
1903
1904
1905
1906 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1907
1908
1909 for (i = 0; i < size0; i++) {
1910 u8 *d = ptr + elem_size * (arr[1].begin + i);
1911 u8 *s = ptr + elem_size * (arr[0].begin + i);
1912 size_t j;
1913
1914 for (j = 0; j < elem_size; j++)
1915 swap(d[j], s[j]);
1916 }
1917
1918 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1919
1920 arr[0].begin += size0;
1921 } else {
1922
1923
1924
1925
1926 arr[0].begin = arr[1].begin;
1927 arr[0].end = arr[1].begin + size0 - 1;
1928 arr[1].begin += size0;
1929 }
1930 }
1931 }
1932
1933 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1934 {
1935 unsigned int i, j;
1936
1937 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1938 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1939 if (q->bufs[j])
1940 break;
1941
1942 if (i == CIO2_MAX_BUFFERS)
1943 return;
1944
1945 if (j) {
1946 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1947 CIO2_MAX_BUFFERS, j);
1948 arrange(q->bufs, sizeof(struct cio2_buffer *),
1949 CIO2_MAX_BUFFERS, j);
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1960 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1961 }
1962
1963 static int __maybe_unused cio2_suspend(struct device *dev)
1964 {
1965 struct pci_dev *pci_dev = to_pci_dev(dev);
1966 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1967 struct cio2_queue *q = cio2->cur_queue;
1968 int r;
1969
1970 dev_dbg(dev, "cio2 suspend\n");
1971 if (!cio2->streaming)
1972 return 0;
1973
1974
1975 r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1976 if (r) {
1977 dev_err(dev, "failed to stop sensor streaming\n");
1978 return r;
1979 }
1980
1981 cio2_hw_exit(cio2, q);
1982 synchronize_irq(pci_dev->irq);
1983
1984 pm_runtime_force_suspend(dev);
1985
1986
1987
1988
1989
1990 cio2_fbpt_rearrange(cio2, q);
1991 q->bufs_first = 0;
1992 q->bufs_next = 0;
1993
1994 return 0;
1995 }
1996
1997 static int __maybe_unused cio2_resume(struct device *dev)
1998 {
1999 struct cio2_device *cio2 = dev_get_drvdata(dev);
2000 struct cio2_queue *q = cio2->cur_queue;
2001 int r;
2002
2003 dev_dbg(dev, "cio2 resume\n");
2004 if (!cio2->streaming)
2005 return 0;
2006
2007 r = pm_runtime_force_resume(dev);
2008 if (r < 0) {
2009 dev_err(dev, "failed to set power %d\n", r);
2010 return r;
2011 }
2012
2013 r = cio2_hw_init(cio2, q);
2014 if (r) {
2015 dev_err(dev, "fail to init cio2 hw\n");
2016 return r;
2017 }
2018
2019 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2020 if (r) {
2021 dev_err(dev, "fail to start sensor streaming\n");
2022 cio2_hw_exit(cio2, q);
2023 }
2024
2025 return r;
2026 }
2027
2028 static const struct dev_pm_ops cio2_pm_ops = {
2029 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2030 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2031 };
2032
2033 static const struct pci_device_id cio2_pci_id_table[] = {
2034 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2035 { }
2036 };
2037
2038 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2039
2040 static struct pci_driver cio2_pci_driver = {
2041 .name = CIO2_NAME,
2042 .id_table = cio2_pci_id_table,
2043 .probe = cio2_pci_probe,
2044 .remove = cio2_pci_remove,
2045 .driver = {
2046 .pm = &cio2_pm_ops,
2047 },
2048 };
2049
2050 module_pci_driver(cio2_pci_driver);
2051
2052 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2053 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2054 MODULE_AUTHOR("Jian Xu Zheng");
2055 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2056 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2057 MODULE_LICENSE("GPL v2");
2058 MODULE_DESCRIPTION("IPU3 CIO2 driver");