Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2008
0004  * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
0005  *
0006  * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
0007  */
0008 
0009 #include <linux/dma-mapping.h>
0010 #include <linux/init.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/err.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/delay.h>
0015 #include <linux/list.h>
0016 #include <linux/clk.h>
0017 #include <linux/vmalloc.h>
0018 #include <linux/string.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/io.h>
0021 #include <linux/module.h>
0022 #include <linux/dma/ipu-dma.h>
0023 
0024 #include "../dmaengine.h"
0025 #include "ipu_intern.h"
0026 
0027 #define FS_VF_IN_VALID  0x00000002
0028 #define FS_ENC_IN_VALID 0x00000001
0029 
0030 static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
0031                    bool wait_for_stop);
0032 
0033 /*
0034  * There can be only one, we could allocate it dynamically, but then we'd have
0035  * to add an extra parameter to some functions, and use something as ugly as
0036  *  struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
0037  * in the ISR
0038  */
0039 static struct ipu ipu_data;
0040 
0041 #define to_ipu(id) container_of(id, struct ipu, idmac)
0042 
0043 static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
0044 {
0045     return __raw_readl(ipu->reg_ic + reg);
0046 }
0047 
0048 #define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
0049 
0050 static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
0051 {
0052     __raw_writel(value, ipu->reg_ic + reg);
0053 }
0054 
0055 #define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
0056 
0057 static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
0058 {
0059     return __raw_readl(ipu->reg_ipu + reg);
0060 }
0061 
0062 static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
0063 {
0064     __raw_writel(value, ipu->reg_ipu + reg);
0065 }
0066 
0067 /*****************************************************************************
0068  * IPU / IC common functions
0069  */
0070 static void dump_idmac_reg(struct ipu *ipu)
0071 {
0072     dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
0073         "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
0074         idmac_read_icreg(ipu, IDMAC_CONF),
0075         idmac_read_icreg(ipu, IC_CONF),
0076         idmac_read_icreg(ipu, IDMAC_CHA_EN),
0077         idmac_read_icreg(ipu, IDMAC_CHA_PRI),
0078         idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
0079     dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
0080         "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
0081         idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
0082         idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
0083         idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
0084         idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
0085         idmac_read_ipureg(ipu, IPU_TASKS_STAT));
0086 }
0087 
0088 static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
0089 {
0090     switch (fmt) {
0091     case IPU_PIX_FMT_GENERIC:   /* generic data */
0092     case IPU_PIX_FMT_RGB332:
0093     case IPU_PIX_FMT_YUV420P:
0094     case IPU_PIX_FMT_YUV422P:
0095     default:
0096         return 1;
0097     case IPU_PIX_FMT_RGB565:
0098     case IPU_PIX_FMT_YUYV:
0099     case IPU_PIX_FMT_UYVY:
0100         return 2;
0101     case IPU_PIX_FMT_BGR24:
0102     case IPU_PIX_FMT_RGB24:
0103         return 3;
0104     case IPU_PIX_FMT_GENERIC_32:    /* generic data */
0105     case IPU_PIX_FMT_BGR32:
0106     case IPU_PIX_FMT_RGB32:
0107     case IPU_PIX_FMT_ABGR32:
0108         return 4;
0109     }
0110 }
0111 
0112 /* Enable direct write to memory by the Camera Sensor Interface */
0113 static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
0114 {
0115     uint32_t ic_conf, mask;
0116 
0117     switch (channel) {
0118     case IDMAC_IC_0:
0119         mask = IC_CONF_PRPENC_EN;
0120         break;
0121     case IDMAC_IC_7:
0122         mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
0123         break;
0124     default:
0125         return;
0126     }
0127     ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
0128     idmac_write_icreg(ipu, ic_conf, IC_CONF);
0129 }
0130 
0131 /* Called under spin_lock_irqsave(&ipu_data.lock) */
0132 static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
0133 {
0134     uint32_t ic_conf, mask;
0135 
0136     switch (channel) {
0137     case IDMAC_IC_0:
0138         mask = IC_CONF_PRPENC_EN;
0139         break;
0140     case IDMAC_IC_7:
0141         mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
0142         break;
0143     default:
0144         return;
0145     }
0146     ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
0147     idmac_write_icreg(ipu, ic_conf, IC_CONF);
0148 }
0149 
0150 static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
0151 {
0152     uint32_t stat = TASK_STAT_IDLE;
0153     uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
0154 
0155     switch (channel) {
0156     case IDMAC_IC_7:
0157         stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
0158             TSTAT_CSI2MEM_OFFSET;
0159         break;
0160     case IDMAC_IC_0:
0161     case IDMAC_SDC_0:
0162     case IDMAC_SDC_1:
0163     default:
0164         break;
0165     }
0166     return stat;
0167 }
0168 
0169 struct chan_param_mem_planar {
0170     /* Word 0 */
0171     u32 xv:10;
0172     u32 yv:10;
0173     u32 xb:12;
0174 
0175     u32 yb:12;
0176     u32 res1:2;
0177     u32 nsb:1;
0178     u32 lnpb:6;
0179     u32 ubo_l:11;
0180 
0181     u32 ubo_h:15;
0182     u32 vbo_l:17;
0183 
0184     u32 vbo_h:9;
0185     u32 res2:3;
0186     u32 fw:12;
0187     u32 fh_l:8;
0188 
0189     u32 fh_h:4;
0190     u32 res3:28;
0191 
0192     /* Word 1 */
0193     u32 eba0;
0194 
0195     u32 eba1;
0196 
0197     u32 bpp:3;
0198     u32 sl:14;
0199     u32 pfs:3;
0200     u32 bam:3;
0201     u32 res4:2;
0202     u32 npb:6;
0203     u32 res5:1;
0204 
0205     u32 sat:2;
0206     u32 res6:30;
0207 } __attribute__ ((packed));
0208 
0209 struct chan_param_mem_interleaved {
0210     /* Word 0 */
0211     u32 xv:10;
0212     u32 yv:10;
0213     u32 xb:12;
0214 
0215     u32 yb:12;
0216     u32 sce:1;
0217     u32 res1:1;
0218     u32 nsb:1;
0219     u32 lnpb:6;
0220     u32 sx:10;
0221     u32 sy_l:1;
0222 
0223     u32 sy_h:9;
0224     u32 ns:10;
0225     u32 sm:10;
0226     u32 sdx_l:3;
0227 
0228     u32 sdx_h:2;
0229     u32 sdy:5;
0230     u32 sdrx:1;
0231     u32 sdry:1;
0232     u32 sdr1:1;
0233     u32 res2:2;
0234     u32 fw:12;
0235     u32 fh_l:8;
0236 
0237     u32 fh_h:4;
0238     u32 res3:28;
0239 
0240     /* Word 1 */
0241     u32 eba0;
0242 
0243     u32 eba1;
0244 
0245     u32 bpp:3;
0246     u32 sl:14;
0247     u32 pfs:3;
0248     u32 bam:3;
0249     u32 res4:2;
0250     u32 npb:6;
0251     u32 res5:1;
0252 
0253     u32 sat:2;
0254     u32 scc:1;
0255     u32 ofs0:5;
0256     u32 ofs1:5;
0257     u32 ofs2:5;
0258     u32 ofs3:5;
0259     u32 wid0:3;
0260     u32 wid1:3;
0261     u32 wid2:3;
0262 
0263     u32 wid3:3;
0264     u32 dec_sel:1;
0265     u32 res6:28;
0266 } __attribute__ ((packed));
0267 
0268 union chan_param_mem {
0269     struct chan_param_mem_planar        pp;
0270     struct chan_param_mem_interleaved   ip;
0271 };
0272 
0273 static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
0274                       u32 u_offset, u32 v_offset)
0275 {
0276     params->pp.ubo_l = u_offset & 0x7ff;
0277     params->pp.ubo_h = u_offset >> 11;
0278     params->pp.vbo_l = v_offset & 0x1ffff;
0279     params->pp.vbo_h = v_offset >> 17;
0280 }
0281 
0282 static void ipu_ch_param_set_size(union chan_param_mem *params,
0283                   uint32_t pixel_fmt, uint16_t width,
0284                   uint16_t height, uint16_t stride)
0285 {
0286     u32 u_offset;
0287     u32 v_offset;
0288 
0289     params->pp.fw       = width - 1;
0290     params->pp.fh_l     = height - 1;
0291     params->pp.fh_h     = (height - 1) >> 8;
0292     params->pp.sl       = stride - 1;
0293 
0294     switch (pixel_fmt) {
0295     case IPU_PIX_FMT_GENERIC:
0296         /*Represents 8-bit Generic data */
0297         params->pp.bpp  = 3;
0298         params->pp.pfs  = 7;
0299         params->pp.npb  = 31;
0300         params->pp.sat  = 2;        /* SAT = use 32-bit access */
0301         break;
0302     case IPU_PIX_FMT_GENERIC_32:
0303         /*Represents 32-bit Generic data */
0304         params->pp.bpp  = 0;
0305         params->pp.pfs  = 7;
0306         params->pp.npb  = 7;
0307         params->pp.sat  = 2;        /* SAT = use 32-bit access */
0308         break;
0309     case IPU_PIX_FMT_RGB565:
0310         params->ip.bpp  = 2;
0311         params->ip.pfs  = 4;
0312         params->ip.npb  = 15;
0313         params->ip.sat  = 2;        /* SAT = 32-bit access */
0314         params->ip.ofs0 = 0;        /* Red bit offset */
0315         params->ip.ofs1 = 5;        /* Green bit offset */
0316         params->ip.ofs2 = 11;       /* Blue bit offset */
0317         params->ip.ofs3 = 16;       /* Alpha bit offset */
0318         params->ip.wid0 = 4;        /* Red bit width - 1 */
0319         params->ip.wid1 = 5;        /* Green bit width - 1 */
0320         params->ip.wid2 = 4;        /* Blue bit width - 1 */
0321         break;
0322     case IPU_PIX_FMT_BGR24:
0323         params->ip.bpp  = 1;        /* 24 BPP & RGB PFS */
0324         params->ip.pfs  = 4;
0325         params->ip.npb  = 7;
0326         params->ip.sat  = 2;        /* SAT = 32-bit access */
0327         params->ip.ofs0 = 0;        /* Red bit offset */
0328         params->ip.ofs1 = 8;        /* Green bit offset */
0329         params->ip.ofs2 = 16;       /* Blue bit offset */
0330         params->ip.ofs3 = 24;       /* Alpha bit offset */
0331         params->ip.wid0 = 7;        /* Red bit width - 1 */
0332         params->ip.wid1 = 7;        /* Green bit width - 1 */
0333         params->ip.wid2 = 7;        /* Blue bit width - 1 */
0334         break;
0335     case IPU_PIX_FMT_RGB24:
0336         params->ip.bpp  = 1;        /* 24 BPP & RGB PFS */
0337         params->ip.pfs  = 4;
0338         params->ip.npb  = 7;
0339         params->ip.sat  = 2;        /* SAT = 32-bit access */
0340         params->ip.ofs0 = 16;       /* Red bit offset */
0341         params->ip.ofs1 = 8;        /* Green bit offset */
0342         params->ip.ofs2 = 0;        /* Blue bit offset */
0343         params->ip.ofs3 = 24;       /* Alpha bit offset */
0344         params->ip.wid0 = 7;        /* Red bit width - 1 */
0345         params->ip.wid1 = 7;        /* Green bit width - 1 */
0346         params->ip.wid2 = 7;        /* Blue bit width - 1 */
0347         break;
0348     case IPU_PIX_FMT_BGRA32:
0349     case IPU_PIX_FMT_BGR32:
0350     case IPU_PIX_FMT_ABGR32:
0351         params->ip.bpp  = 0;
0352         params->ip.pfs  = 4;
0353         params->ip.npb  = 7;
0354         params->ip.sat  = 2;        /* SAT = 32-bit access */
0355         params->ip.ofs0 = 8;        /* Red bit offset */
0356         params->ip.ofs1 = 16;       /* Green bit offset */
0357         params->ip.ofs2 = 24;       /* Blue bit offset */
0358         params->ip.ofs3 = 0;        /* Alpha bit offset */
0359         params->ip.wid0 = 7;        /* Red bit width - 1 */
0360         params->ip.wid1 = 7;        /* Green bit width - 1 */
0361         params->ip.wid2 = 7;        /* Blue bit width - 1 */
0362         params->ip.wid3 = 7;        /* Alpha bit width - 1 */
0363         break;
0364     case IPU_PIX_FMT_RGBA32:
0365     case IPU_PIX_FMT_RGB32:
0366         params->ip.bpp  = 0;
0367         params->ip.pfs  = 4;
0368         params->ip.npb  = 7;
0369         params->ip.sat  = 2;        /* SAT = 32-bit access */
0370         params->ip.ofs0 = 24;       /* Red bit offset */
0371         params->ip.ofs1 = 16;       /* Green bit offset */
0372         params->ip.ofs2 = 8;        /* Blue bit offset */
0373         params->ip.ofs3 = 0;        /* Alpha bit offset */
0374         params->ip.wid0 = 7;        /* Red bit width - 1 */
0375         params->ip.wid1 = 7;        /* Green bit width - 1 */
0376         params->ip.wid2 = 7;        /* Blue bit width - 1 */
0377         params->ip.wid3 = 7;        /* Alpha bit width - 1 */
0378         break;
0379     case IPU_PIX_FMT_UYVY:
0380         params->ip.bpp  = 2;
0381         params->ip.pfs  = 6;
0382         params->ip.npb  = 7;
0383         params->ip.sat  = 2;        /* SAT = 32-bit access */
0384         break;
0385     case IPU_PIX_FMT_YUV420P2:
0386     case IPU_PIX_FMT_YUV420P:
0387         params->ip.bpp  = 3;
0388         params->ip.pfs  = 3;
0389         params->ip.npb  = 7;
0390         params->ip.sat  = 2;        /* SAT = 32-bit access */
0391         u_offset = stride * height;
0392         v_offset = u_offset + u_offset / 4;
0393         ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
0394         break;
0395     case IPU_PIX_FMT_YVU422P:
0396         params->ip.bpp  = 3;
0397         params->ip.pfs  = 2;
0398         params->ip.npb  = 7;
0399         params->ip.sat  = 2;        /* SAT = 32-bit access */
0400         v_offset = stride * height;
0401         u_offset = v_offset + v_offset / 2;
0402         ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
0403         break;
0404     case IPU_PIX_FMT_YUV422P:
0405         params->ip.bpp  = 3;
0406         params->ip.pfs  = 2;
0407         params->ip.npb  = 7;
0408         params->ip.sat  = 2;        /* SAT = 32-bit access */
0409         u_offset = stride * height;
0410         v_offset = u_offset + u_offset / 2;
0411         ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
0412         break;
0413     default:
0414         dev_err(ipu_data.dev,
0415             "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
0416         break;
0417     }
0418 
0419     params->pp.nsb = 1;
0420 }
0421 
0422 static void ipu_ch_param_set_buffer(union chan_param_mem *params,
0423                     dma_addr_t buf0, dma_addr_t buf1)
0424 {
0425     params->pp.eba0 = buf0;
0426     params->pp.eba1 = buf1;
0427 }
0428 
0429 static void ipu_ch_param_set_rotation(union chan_param_mem *params,
0430                       enum ipu_rotate_mode rotate)
0431 {
0432     params->pp.bam = rotate;
0433 }
0434 
0435 static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
0436                 uint32_t num_words)
0437 {
0438     for (; num_words > 0; num_words--) {
0439         dev_dbg(ipu_data.dev,
0440             "write param mem - addr = 0x%08X, data = 0x%08X\n",
0441             addr, *data);
0442         idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
0443         idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
0444         addr++;
0445         if ((addr & 0x7) == 5) {
0446             addr &= ~0x7;   /* set to word 0 */
0447             addr += 8;  /* increment to next row */
0448         }
0449     }
0450 }
0451 
0452 static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
0453                   uint32_t *resize_coeff,
0454                   uint32_t *downsize_coeff)
0455 {
0456     uint32_t temp_size;
0457     uint32_t temp_downsize;
0458 
0459     *resize_coeff   = 1 << 13;
0460     *downsize_coeff = 1 << 13;
0461 
0462     /* Cannot downsize more than 8:1 */
0463     if (out_size << 3 < in_size)
0464         return -EINVAL;
0465 
0466     /* compute downsizing coefficient */
0467     temp_downsize = 0;
0468     temp_size = in_size;
0469     while (temp_size >= out_size * 2 && temp_downsize < 2) {
0470         temp_size >>= 1;
0471         temp_downsize++;
0472     }
0473     *downsize_coeff = temp_downsize;
0474 
0475     /*
0476      * compute resizing coefficient using the following formula:
0477      * resize_coeff = M*(SI -1)/(SO - 1)
0478      * where M = 2^13, SI - input size, SO - output size
0479      */
0480     *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
0481     if (*resize_coeff >= 16384L) {
0482         dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
0483         *resize_coeff = 0x3FFF;
0484     }
0485 
0486     dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
0487         "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
0488         *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
0489         ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
0490 
0491     return 0;
0492 }
0493 
0494 static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
0495 {
0496     switch (fmt) {
0497     case IPU_PIX_FMT_RGB565:
0498     case IPU_PIX_FMT_BGR24:
0499     case IPU_PIX_FMT_RGB24:
0500     case IPU_PIX_FMT_BGR32:
0501     case IPU_PIX_FMT_RGB32:
0502         return IPU_COLORSPACE_RGB;
0503     default:
0504         return IPU_COLORSPACE_YCBCR;
0505     }
0506 }
0507 
0508 static int ipu_ic_init_prpenc(struct ipu *ipu,
0509                   union ipu_channel_param *params, bool src_is_csi)
0510 {
0511     uint32_t reg, ic_conf;
0512     uint32_t downsize_coeff, resize_coeff;
0513     enum ipu_color_space in_fmt, out_fmt;
0514 
0515     /* Setup vertical resizing */
0516     calc_resize_coeffs(params->video.in_height,
0517                 params->video.out_height,
0518                 &resize_coeff, &downsize_coeff);
0519     reg = (downsize_coeff << 30) | (resize_coeff << 16);
0520 
0521     /* Setup horizontal resizing */
0522     calc_resize_coeffs(params->video.in_width,
0523                 params->video.out_width,
0524                 &resize_coeff, &downsize_coeff);
0525     reg |= (downsize_coeff << 14) | resize_coeff;
0526 
0527     /* Setup color space conversion */
0528     in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
0529     out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
0530 
0531     /*
0532      * Colourspace conversion unsupported yet - see _init_csc() in
0533      * Freescale sources
0534      */
0535     if (in_fmt != out_fmt) {
0536         dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
0537         return -EOPNOTSUPP;
0538     }
0539 
0540     idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
0541 
0542     ic_conf = idmac_read_icreg(ipu, IC_CONF);
0543 
0544     if (src_is_csi)
0545         ic_conf &= ~IC_CONF_RWS_EN;
0546     else
0547         ic_conf |= IC_CONF_RWS_EN;
0548 
0549     idmac_write_icreg(ipu, ic_conf, IC_CONF);
0550 
0551     return 0;
0552 }
0553 
0554 static uint32_t dma_param_addr(uint32_t dma_ch)
0555 {
0556     /* Channel Parameter Memory */
0557     return 0x10000 | (dma_ch << 4);
0558 }
0559 
0560 static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
0561                      bool prio)
0562 {
0563     u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
0564 
0565     if (prio)
0566         reg |= 1UL << channel;
0567     else
0568         reg &= ~(1UL << channel);
0569 
0570     idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
0571 
0572     dump_idmac_reg(ipu);
0573 }
0574 
0575 static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
0576 {
0577     uint32_t mask;
0578 
0579     switch (channel) {
0580     case IDMAC_IC_0:
0581     case IDMAC_IC_7:
0582         mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
0583         break;
0584     case IDMAC_SDC_0:
0585     case IDMAC_SDC_1:
0586         mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
0587         break;
0588     default:
0589         mask = 0;
0590         break;
0591     }
0592 
0593     return mask;
0594 }
0595 
0596 /**
0597  * ipu_enable_channel() - enable an IPU channel.
0598  * @idmac:  IPU DMAC context.
0599  * @ichan:  IDMAC channel.
0600  * @return: 0 on success or negative error code on failure.
0601  */
0602 static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
0603 {
0604     struct ipu *ipu = to_ipu(idmac);
0605     enum ipu_channel channel = ichan->dma_chan.chan_id;
0606     uint32_t reg;
0607     unsigned long flags;
0608 
0609     spin_lock_irqsave(&ipu->lock, flags);
0610 
0611     /* Reset to buffer 0 */
0612     idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
0613     ichan->active_buffer = 0;
0614     ichan->status = IPU_CHANNEL_ENABLED;
0615 
0616     switch (channel) {
0617     case IDMAC_SDC_0:
0618     case IDMAC_SDC_1:
0619     case IDMAC_IC_7:
0620         ipu_channel_set_priority(ipu, channel, true);
0621         break;
0622     default:
0623         break;
0624     }
0625 
0626     reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
0627 
0628     idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
0629 
0630     ipu_ic_enable_task(ipu, channel);
0631 
0632     spin_unlock_irqrestore(&ipu->lock, flags);
0633     return 0;
0634 }
0635 
0636 /**
0637  * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
0638  * @ichan:  IDMAC channel.
0639  * @pixel_fmt:  pixel format of buffer. Pixel format is a FOURCC ASCII code.
0640  * @width:  width of buffer in pixels.
0641  * @height: height of buffer in pixels.
0642  * @stride: stride length of buffer in pixels.
0643  * @rot_mode:   rotation mode of buffer. A rotation setting other than
0644  *      IPU_ROTATE_VERT_FLIP should only be used for input buffers of
0645  *      rotation channels.
0646  * @phyaddr_0:  buffer 0 physical address.
0647  * @phyaddr_1:  buffer 1 physical address. Setting this to a value other than
0648  *      NULL enables double buffering mode.
0649  * @return: 0 on success or negative error code on failure.
0650  */
0651 static int ipu_init_channel_buffer(struct idmac_channel *ichan,
0652                    enum pixel_fmt pixel_fmt,
0653                    uint16_t width, uint16_t height,
0654                    uint32_t stride,
0655                    enum ipu_rotate_mode rot_mode,
0656                    dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
0657 {
0658     enum ipu_channel channel = ichan->dma_chan.chan_id;
0659     struct idmac *idmac = to_idmac(ichan->dma_chan.device);
0660     struct ipu *ipu = to_ipu(idmac);
0661     union chan_param_mem params = {};
0662     unsigned long flags;
0663     uint32_t reg;
0664     uint32_t stride_bytes;
0665 
0666     stride_bytes = stride * bytes_per_pixel(pixel_fmt);
0667 
0668     if (stride_bytes % 4) {
0669         dev_err(ipu->dev,
0670             "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
0671             stride, stride_bytes);
0672         return -EINVAL;
0673     }
0674 
0675     /* IC channel's stride must be a multiple of 8 pixels */
0676     if ((channel <= IDMAC_IC_13) && (stride % 8)) {
0677         dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
0678         return -EINVAL;
0679     }
0680 
0681     /* Build parameter memory data for DMA channel */
0682     ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
0683     ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
0684     ipu_ch_param_set_rotation(&params, rot_mode);
0685 
0686     spin_lock_irqsave(&ipu->lock, flags);
0687 
0688     ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
0689 
0690     reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
0691 
0692     if (phyaddr_1)
0693         reg |= 1UL << channel;
0694     else
0695         reg &= ~(1UL << channel);
0696 
0697     idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
0698 
0699     ichan->status = IPU_CHANNEL_READY;
0700 
0701     spin_unlock_irqrestore(&ipu->lock, flags);
0702 
0703     return 0;
0704 }
0705 
0706 /**
0707  * ipu_select_buffer() - mark a channel's buffer as ready.
0708  * @channel:    channel ID.
0709  * @buffer_n:   buffer number to mark ready.
0710  */
0711 static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
0712 {
0713     /* No locking - this is a write-one-to-set register, cleared by IPU */
0714     if (buffer_n == 0)
0715         /* Mark buffer 0 as ready. */
0716         idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
0717     else
0718         /* Mark buffer 1 as ready. */
0719         idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
0720 }
0721 
0722 /**
0723  * ipu_update_channel_buffer() - update physical address of a channel buffer.
0724  * @ichan:  IDMAC channel.
0725  * @buffer_n:   buffer number to update.
0726  *      0 or 1 are the only valid values.
0727  * @phyaddr:    buffer physical address.
0728  */
0729 /* Called under spin_lock(_irqsave)(&ichan->lock) */
0730 static void ipu_update_channel_buffer(struct idmac_channel *ichan,
0731                       int buffer_n, dma_addr_t phyaddr)
0732 {
0733     enum ipu_channel channel = ichan->dma_chan.chan_id;
0734     uint32_t reg;
0735     unsigned long flags;
0736 
0737     spin_lock_irqsave(&ipu_data.lock, flags);
0738 
0739     if (buffer_n == 0) {
0740         reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
0741         if (reg & (1UL << channel)) {
0742             ipu_ic_disable_task(&ipu_data, channel);
0743             ichan->status = IPU_CHANNEL_READY;
0744         }
0745 
0746         /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
0747         idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
0748                    0x0008UL, IPU_IMA_ADDR);
0749         idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
0750     } else {
0751         reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
0752         if (reg & (1UL << channel)) {
0753             ipu_ic_disable_task(&ipu_data, channel);
0754             ichan->status = IPU_CHANNEL_READY;
0755         }
0756 
0757         /* Check if double-buffering is already enabled */
0758         reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
0759 
0760         if (!(reg & (1UL << channel)))
0761             idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
0762                        IPU_CHA_DB_MODE_SEL);
0763 
0764         /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
0765         idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
0766                    0x0009UL, IPU_IMA_ADDR);
0767         idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
0768     }
0769 
0770     spin_unlock_irqrestore(&ipu_data.lock, flags);
0771 }
0772 
0773 /* Called under spin_lock_irqsave(&ichan->lock) */
0774 static int ipu_submit_buffer(struct idmac_channel *ichan,
0775     struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
0776 {
0777     unsigned int chan_id = ichan->dma_chan.chan_id;
0778     struct device *dev = &ichan->dma_chan.dev->device;
0779 
0780     if (async_tx_test_ack(&desc->txd))
0781         return -EINTR;
0782 
0783     /*
0784      * On first invocation this shouldn't be necessary, the call to
0785      * ipu_init_channel_buffer() above will set addresses for us, so we
0786      * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
0787      * doing it again shouldn't hurt either.
0788      */
0789     ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
0790 
0791     ipu_select_buffer(chan_id, buf_idx);
0792     dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
0793         sg, chan_id, buf_idx);
0794 
0795     return 0;
0796 }
0797 
0798 /* Called under spin_lock_irqsave(&ichan->lock) */
0799 static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
0800                       struct idmac_tx_desc *desc)
0801 {
0802     struct scatterlist *sg;
0803     int i, ret = 0;
0804 
0805     for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
0806         if (!ichan->sg[i]) {
0807             ichan->sg[i] = sg;
0808 
0809             ret = ipu_submit_buffer(ichan, desc, sg, i);
0810             if (ret < 0)
0811                 return ret;
0812 
0813             sg = sg_next(sg);
0814         }
0815     }
0816 
0817     return ret;
0818 }
0819 
0820 static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
0821 {
0822     struct idmac_tx_desc *desc = to_tx_desc(tx);
0823     struct idmac_channel *ichan = to_idmac_chan(tx->chan);
0824     struct idmac *idmac = to_idmac(tx->chan->device);
0825     struct ipu *ipu = to_ipu(idmac);
0826     struct device *dev = &ichan->dma_chan.dev->device;
0827     dma_cookie_t cookie;
0828     unsigned long flags;
0829     int ret;
0830 
0831     /* Sanity check */
0832     if (!list_empty(&desc->list)) {
0833         /* The descriptor doesn't belong to client */
0834         dev_err(dev, "Descriptor %p not prepared!\n", tx);
0835         return -EBUSY;
0836     }
0837 
0838     mutex_lock(&ichan->chan_mutex);
0839 
0840     async_tx_clear_ack(tx);
0841 
0842     if (ichan->status < IPU_CHANNEL_READY) {
0843         struct idmac_video_param *video = &ichan->params.video;
0844         /*
0845          * Initial buffer assignment - the first two sg-entries from
0846          * the descriptor will end up in the IDMAC buffers
0847          */
0848         dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
0849             sg_dma_address(&desc->sg[1]);
0850 
0851         WARN_ON(ichan->sg[0] || ichan->sg[1]);
0852 
0853         cookie = ipu_init_channel_buffer(ichan,
0854                          video->out_pixel_fmt,
0855                          video->out_width,
0856                          video->out_height,
0857                          video->out_stride,
0858                          IPU_ROTATE_NONE,
0859                          sg_dma_address(&desc->sg[0]),
0860                          dma_1);
0861         if (cookie < 0)
0862             goto out;
0863     }
0864 
0865     dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
0866 
0867     cookie = dma_cookie_assign(tx);
0868 
0869     /* ipu->lock can be taken under ichan->lock, but not v.v. */
0870     spin_lock_irqsave(&ichan->lock, flags);
0871 
0872     list_add_tail(&desc->list, &ichan->queue);
0873     /* submit_buffers() atomically verifies and fills empty sg slots */
0874     ret = ipu_submit_channel_buffers(ichan, desc);
0875 
0876     spin_unlock_irqrestore(&ichan->lock, flags);
0877 
0878     if (ret < 0) {
0879         cookie = ret;
0880         goto dequeue;
0881     }
0882 
0883     if (ichan->status < IPU_CHANNEL_ENABLED) {
0884         ret = ipu_enable_channel(idmac, ichan);
0885         if (ret < 0) {
0886             cookie = ret;
0887             goto dequeue;
0888         }
0889     }
0890 
0891     dump_idmac_reg(ipu);
0892 
0893 dequeue:
0894     if (cookie < 0) {
0895         spin_lock_irqsave(&ichan->lock, flags);
0896         list_del_init(&desc->list);
0897         spin_unlock_irqrestore(&ichan->lock, flags);
0898         tx->cookie = cookie;
0899         ichan->dma_chan.cookie = cookie;
0900     }
0901 
0902 out:
0903     mutex_unlock(&ichan->chan_mutex);
0904 
0905     return cookie;
0906 }
0907 
0908 /* Called with ichan->chan_mutex held */
0909 static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
0910 {
0911     struct idmac_tx_desc *desc =
0912         vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
0913     struct idmac *idmac = to_idmac(ichan->dma_chan.device);
0914 
0915     if (!desc)
0916         return -ENOMEM;
0917 
0918     /* No interrupts, just disable the tasklet for a moment */
0919     tasklet_disable(&to_ipu(idmac)->tasklet);
0920 
0921     ichan->n_tx_desc = n;
0922     ichan->desc = desc;
0923     INIT_LIST_HEAD(&ichan->queue);
0924     INIT_LIST_HEAD(&ichan->free_list);
0925 
0926     while (n--) {
0927         struct dma_async_tx_descriptor *txd = &desc->txd;
0928 
0929         memset(txd, 0, sizeof(*txd));
0930         dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
0931         txd->tx_submit      = idmac_tx_submit;
0932 
0933         list_add(&desc->list, &ichan->free_list);
0934 
0935         desc++;
0936     }
0937 
0938     tasklet_enable(&to_ipu(idmac)->tasklet);
0939 
0940     return 0;
0941 }
0942 
0943 /**
0944  * ipu_init_channel() - initialize an IPU channel.
0945  * @idmac:  IPU DMAC context.
0946  * @ichan:  pointer to the channel object.
0947  * @return      0 on success or negative error code on failure.
0948  */
0949 static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
0950 {
0951     union ipu_channel_param *params = &ichan->params;
0952     uint32_t ipu_conf;
0953     enum ipu_channel channel = ichan->dma_chan.chan_id;
0954     unsigned long flags;
0955     uint32_t reg;
0956     struct ipu *ipu = to_ipu(idmac);
0957     int ret = 0, n_desc = 0;
0958 
0959     dev_dbg(ipu->dev, "init channel = %d\n", channel);
0960 
0961     if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
0962         channel != IDMAC_IC_7)
0963         return -EINVAL;
0964 
0965     spin_lock_irqsave(&ipu->lock, flags);
0966 
0967     switch (channel) {
0968     case IDMAC_IC_7:
0969         n_desc = 16;
0970         reg = idmac_read_icreg(ipu, IC_CONF);
0971         idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
0972         break;
0973     case IDMAC_IC_0:
0974         n_desc = 16;
0975         reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
0976         idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
0977         ret = ipu_ic_init_prpenc(ipu, params, true);
0978         break;
0979     case IDMAC_SDC_0:
0980     case IDMAC_SDC_1:
0981         n_desc = 4;
0982         break;
0983     default:
0984         break;
0985     }
0986 
0987     ipu->channel_init_mask |= 1L << channel;
0988 
0989     /* Enable IPU sub module */
0990     ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
0991         ipu_channel_conf_mask(channel);
0992     idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
0993 
0994     spin_unlock_irqrestore(&ipu->lock, flags);
0995 
0996     if (n_desc && !ichan->desc)
0997         ret = idmac_desc_alloc(ichan, n_desc);
0998 
0999     dump_idmac_reg(ipu);
1000 
1001     return ret;
1002 }
1003 
1004 /**
1005  * ipu_uninit_channel() - uninitialize an IPU channel.
1006  * @idmac:  IPU DMAC context.
1007  * @ichan:  pointer to the channel object.
1008  */
1009 static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
1010 {
1011     enum ipu_channel channel = ichan->dma_chan.chan_id;
1012     unsigned long flags;
1013     uint32_t reg;
1014     unsigned long chan_mask = 1UL << channel;
1015     uint32_t ipu_conf;
1016     struct ipu *ipu = to_ipu(idmac);
1017 
1018     spin_lock_irqsave(&ipu->lock, flags);
1019 
1020     if (!(ipu->channel_init_mask & chan_mask)) {
1021         dev_err(ipu->dev, "Channel already uninitialized %d\n",
1022             channel);
1023         spin_unlock_irqrestore(&ipu->lock, flags);
1024         return;
1025     }
1026 
1027     /* Reset the double buffer */
1028     reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
1029     idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
1030 
1031     ichan->sec_chan_en = false;
1032 
1033     switch (channel) {
1034     case IDMAC_IC_7:
1035         reg = idmac_read_icreg(ipu, IC_CONF);
1036         idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
1037                  IC_CONF);
1038         break;
1039     case IDMAC_IC_0:
1040         reg = idmac_read_icreg(ipu, IC_CONF);
1041         idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
1042                   IC_CONF);
1043         break;
1044     case IDMAC_SDC_0:
1045     case IDMAC_SDC_1:
1046     default:
1047         break;
1048     }
1049 
1050     ipu->channel_init_mask &= ~(1L << channel);
1051 
1052     ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
1053         ~ipu_channel_conf_mask(channel);
1054     idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1055 
1056     spin_unlock_irqrestore(&ipu->lock, flags);
1057 
1058     ichan->n_tx_desc = 0;
1059     vfree(ichan->desc);
1060     ichan->desc = NULL;
1061 }
1062 
1063 /**
1064  * ipu_disable_channel() - disable an IPU channel.
1065  * @idmac:      IPU DMAC context.
1066  * @ichan:      channel object pointer.
1067  * @wait_for_stop:  flag to set whether to wait for channel end of frame or
1068  *          return immediately.
1069  * @return:     0 on success or negative error code on failure.
1070  */
1071 static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1072                    bool wait_for_stop)
1073 {
1074     enum ipu_channel channel = ichan->dma_chan.chan_id;
1075     struct ipu *ipu = to_ipu(idmac);
1076     uint32_t reg;
1077     unsigned long flags;
1078     unsigned long chan_mask = 1UL << channel;
1079     unsigned int timeout;
1080 
1081     if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
1082         timeout = 40;
1083         /* This waiting always fails. Related to spurious irq problem */
1084         while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
1085                (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
1086             timeout--;
1087             msleep(10);
1088 
1089             if (!timeout) {
1090                 dev_dbg(ipu->dev,
1091                     "Warning: timeout waiting for channel %u to "
1092                     "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1093                     "busy = 0x%08X, tstat = 0x%08X\n", channel,
1094                     idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
1095                     idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
1096                     idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
1097                     idmac_read_ipureg(ipu, IPU_TASKS_STAT));
1098                 break;
1099             }
1100         }
1101         dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
1102     }
1103     /* SDC BG and FG must be disabled before DMA is disabled */
1104     if (wait_for_stop && (channel == IDMAC_SDC_0 ||
1105                   channel == IDMAC_SDC_1)) {
1106         for (timeout = 5;
1107              timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
1108             msleep(5);
1109     }
1110 
1111     spin_lock_irqsave(&ipu->lock, flags);
1112 
1113     /* Disable IC task */
1114     ipu_ic_disable_task(ipu, channel);
1115 
1116     /* Disable DMA channel(s) */
1117     reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1118     idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1119 
1120     spin_unlock_irqrestore(&ipu->lock, flags);
1121 
1122     return 0;
1123 }
1124 
1125 static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1126     struct idmac_tx_desc **desc, struct scatterlist *sg)
1127 {
1128     struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1129 
1130     if (sgnew)
1131         /* next sg-element in this list */
1132         return sgnew;
1133 
1134     if ((*desc)->list.next == &ichan->queue)
1135         /* No more descriptors on the queue */
1136         return NULL;
1137 
1138     /* Fetch next descriptor */
1139     *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1140     return (*desc)->sg;
1141 }
1142 
1143 /*
1144  * We have several possibilities here:
1145  * current BUF      next BUF
1146  *
1147  * not last sg      next not last sg
1148  * not last sg      next last sg
1149  * last sg      first sg from next descriptor
1150  * last sg      NULL
1151  *
1152  * Besides, the descriptor queue might be empty or not. We process all these
1153  * cases carefully.
1154  */
1155 static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1156 {
1157     struct idmac_channel *ichan = dev_id;
1158     struct device *dev = &ichan->dma_chan.dev->device;
1159     unsigned int chan_id = ichan->dma_chan.chan_id;
1160     struct scatterlist **sg, *sgnext, *sgnew = NULL;
1161     /* Next transfer descriptor */
1162     struct idmac_tx_desc *desc, *descnew;
1163     bool done = false;
1164     u32 ready0, ready1, curbuf, err;
1165     struct dmaengine_desc_callback cb;
1166 
1167     /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1168 
1169     dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1170 
1171     spin_lock(&ipu_data.lock);
1172 
1173     ready0  = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1174     ready1  = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1175     curbuf  = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1176     err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1177 
1178     if (err & (1 << chan_id)) {
1179         idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1180         spin_unlock(&ipu_data.lock);
1181         /*
1182          * Doing this
1183          * ichan->sg[0] = ichan->sg[1] = NULL;
1184          * you can force channel re-enable on the next tx_submit(), but
1185          * this is dirty - think about descriptors with multiple
1186          * sg elements.
1187          */
1188         dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1189              chan_id, ready0, ready1, curbuf);
1190         return IRQ_HANDLED;
1191     }
1192     spin_unlock(&ipu_data.lock);
1193 
1194     /* Other interrupts do not interfere with this channel */
1195     spin_lock(&ichan->lock);
1196     if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1197              (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1198              )) {
1199         spin_unlock(&ichan->lock);
1200         dev_dbg(dev,
1201             "IRQ with active buffer still ready on channel %x, "
1202             "active %d, ready %x, %x!\n", chan_id,
1203             ichan->active_buffer, ready0, ready1);
1204         return IRQ_NONE;
1205     }
1206 
1207     if (unlikely(list_empty(&ichan->queue))) {
1208         ichan->sg[ichan->active_buffer] = NULL;
1209         spin_unlock(&ichan->lock);
1210         dev_err(dev,
1211             "IRQ without queued buffers on channel %x, active %d, "
1212             "ready %x, %x!\n", chan_id,
1213             ichan->active_buffer, ready0, ready1);
1214         return IRQ_NONE;
1215     }
1216 
1217     /*
1218      * active_buffer is a software flag, it shows which buffer we are
1219      * currently expecting back from the hardware, IDMAC should be
1220      * processing the other buffer already
1221      */
1222     sg = &ichan->sg[ichan->active_buffer];
1223     sgnext = ichan->sg[!ichan->active_buffer];
1224 
1225     if (!*sg) {
1226         spin_unlock(&ichan->lock);
1227         return IRQ_HANDLED;
1228     }
1229 
1230     desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1231     descnew = desc;
1232 
1233     dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
1234         irq, (u64)sg_dma_address(*sg),
1235         sgnext ? (u64)sg_dma_address(sgnext) : 0,
1236         ichan->active_buffer, curbuf);
1237 
1238     /* Find the descriptor of sgnext */
1239     sgnew = idmac_sg_next(ichan, &descnew, *sg);
1240     if (sgnext != sgnew)
1241         dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1242 
1243     /*
1244      * if sgnext == NULL sg must be the last element in a scatterlist and
1245      * queue must be empty
1246      */
1247     if (unlikely(!sgnext)) {
1248         if (!WARN_ON(sg_next(*sg)))
1249             dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1250         ichan->sg[!ichan->active_buffer] = sgnew;
1251 
1252         if (unlikely(sgnew)) {
1253             ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1254         } else {
1255             spin_lock(&ipu_data.lock);
1256             ipu_ic_disable_task(&ipu_data, chan_id);
1257             spin_unlock(&ipu_data.lock);
1258             ichan->status = IPU_CHANNEL_READY;
1259             /* Continue to check for complete descriptor */
1260         }
1261     }
1262 
1263     /* Calculate and submit the next sg element */
1264     sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1265 
1266     if (unlikely(!sg_next(*sg)) || !sgnext) {
1267         /*
1268          * Last element in scatterlist done, remove from the queue,
1269          * _init for debugging
1270          */
1271         list_del_init(&desc->list);
1272         done = true;
1273     }
1274 
1275     *sg = sgnew;
1276 
1277     if (likely(sgnew) &&
1278         ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1279         dmaengine_desc_get_callback(&descnew->txd, &cb);
1280 
1281         list_del_init(&descnew->list);
1282         spin_unlock(&ichan->lock);
1283 
1284         dmaengine_desc_callback_invoke(&cb, NULL);
1285         spin_lock(&ichan->lock);
1286     }
1287 
1288     /* Flip the active buffer - even if update above failed */
1289     ichan->active_buffer = !ichan->active_buffer;
1290     if (done)
1291         dma_cookie_complete(&desc->txd);
1292 
1293     dmaengine_desc_get_callback(&desc->txd, &cb);
1294 
1295     spin_unlock(&ichan->lock);
1296 
1297     if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
1298         dmaengine_desc_callback_invoke(&cb, NULL);
1299 
1300     return IRQ_HANDLED;
1301 }
1302 
1303 static void ipu_gc_tasklet(struct tasklet_struct *t)
1304 {
1305     struct ipu *ipu = from_tasklet(ipu, t, tasklet);
1306     int i;
1307 
1308     for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1309         struct idmac_channel *ichan = ipu->channel + i;
1310         struct idmac_tx_desc *desc;
1311         unsigned long flags;
1312         struct scatterlist *sg;
1313         int j, k;
1314 
1315         for (j = 0; j < ichan->n_tx_desc; j++) {
1316             desc = ichan->desc + j;
1317             spin_lock_irqsave(&ichan->lock, flags);
1318             if (async_tx_test_ack(&desc->txd)) {
1319                 list_move(&desc->list, &ichan->free_list);
1320                 for_each_sg(desc->sg, sg, desc->sg_len, k) {
1321                     if (ichan->sg[0] == sg)
1322                         ichan->sg[0] = NULL;
1323                     else if (ichan->sg[1] == sg)
1324                         ichan->sg[1] = NULL;
1325                 }
1326                 async_tx_clear_ack(&desc->txd);
1327             }
1328             spin_unlock_irqrestore(&ichan->lock, flags);
1329         }
1330     }
1331 }
1332 
1333 /* Allocate and initialise a transfer descriptor. */
1334 static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1335         struct scatterlist *sgl, unsigned int sg_len,
1336         enum dma_transfer_direction direction, unsigned long tx_flags,
1337         void *context)
1338 {
1339     struct idmac_channel *ichan = to_idmac_chan(chan);
1340     struct idmac_tx_desc *desc = NULL;
1341     struct dma_async_tx_descriptor *txd = NULL;
1342     unsigned long flags;
1343 
1344     /* We only can handle these three channels so far */
1345     if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
1346         chan->chan_id != IDMAC_IC_7)
1347         return NULL;
1348 
1349     if (!is_slave_direction(direction)) {
1350         dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1351         return NULL;
1352     }
1353 
1354     mutex_lock(&ichan->chan_mutex);
1355 
1356     spin_lock_irqsave(&ichan->lock, flags);
1357     if (!list_empty(&ichan->free_list)) {
1358         desc = list_entry(ichan->free_list.next,
1359                   struct idmac_tx_desc, list);
1360 
1361         list_del_init(&desc->list);
1362 
1363         desc->sg_len    = sg_len;
1364         desc->sg    = sgl;
1365         txd     = &desc->txd;
1366         txd->flags  = tx_flags;
1367     }
1368     spin_unlock_irqrestore(&ichan->lock, flags);
1369 
1370     mutex_unlock(&ichan->chan_mutex);
1371 
1372     tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
1373 
1374     return txd;
1375 }
1376 
1377 /* Re-select the current buffer and re-activate the channel */
1378 static void idmac_issue_pending(struct dma_chan *chan)
1379 {
1380     struct idmac_channel *ichan = to_idmac_chan(chan);
1381     struct idmac *idmac = to_idmac(chan->device);
1382     struct ipu *ipu = to_ipu(idmac);
1383     unsigned long flags;
1384 
1385     /* This is not always needed, but doesn't hurt either */
1386     spin_lock_irqsave(&ipu->lock, flags);
1387     ipu_select_buffer(chan->chan_id, ichan->active_buffer);
1388     spin_unlock_irqrestore(&ipu->lock, flags);
1389 
1390     /*
1391      * Might need to perform some parts of initialisation from
1392      * ipu_enable_channel(), but not all, we do not want to reset to buffer
1393      * 0, don't need to set priority again either, but re-enabling the task
1394      * and the channel might be a good idea.
1395      */
1396 }
1397 
1398 static int idmac_pause(struct dma_chan *chan)
1399 {
1400     struct idmac_channel *ichan = to_idmac_chan(chan);
1401     struct idmac *idmac = to_idmac(chan->device);
1402     struct ipu *ipu = to_ipu(idmac);
1403     struct list_head *list, *tmp;
1404     unsigned long flags;
1405 
1406     mutex_lock(&ichan->chan_mutex);
1407 
1408     spin_lock_irqsave(&ipu->lock, flags);
1409     ipu_ic_disable_task(ipu, chan->chan_id);
1410 
1411     /* Return all descriptors into "prepared" state */
1412     list_for_each_safe(list, tmp, &ichan->queue)
1413         list_del_init(list);
1414 
1415     ichan->sg[0] = NULL;
1416     ichan->sg[1] = NULL;
1417 
1418     spin_unlock_irqrestore(&ipu->lock, flags);
1419 
1420     ichan->status = IPU_CHANNEL_INITIALIZED;
1421 
1422     mutex_unlock(&ichan->chan_mutex);
1423 
1424     return 0;
1425 }
1426 
1427 static int __idmac_terminate_all(struct dma_chan *chan)
1428 {
1429     struct idmac_channel *ichan = to_idmac_chan(chan);
1430     struct idmac *idmac = to_idmac(chan->device);
1431     struct ipu *ipu = to_ipu(idmac);
1432     unsigned long flags;
1433     int i;
1434 
1435     ipu_disable_channel(idmac, ichan,
1436                 ichan->status >= IPU_CHANNEL_ENABLED);
1437 
1438     tasklet_disable(&ipu->tasklet);
1439 
1440     /* ichan->queue is modified in ISR, have to spinlock */
1441     spin_lock_irqsave(&ichan->lock, flags);
1442     list_splice_init(&ichan->queue, &ichan->free_list);
1443 
1444     if (ichan->desc)
1445         for (i = 0; i < ichan->n_tx_desc; i++) {
1446             struct idmac_tx_desc *desc = ichan->desc + i;
1447             if (list_empty(&desc->list))
1448                 /* Descriptor was prepared, but not submitted */
1449                 list_add(&desc->list, &ichan->free_list);
1450 
1451             async_tx_clear_ack(&desc->txd);
1452         }
1453 
1454     ichan->sg[0] = NULL;
1455     ichan->sg[1] = NULL;
1456     spin_unlock_irqrestore(&ichan->lock, flags);
1457 
1458     tasklet_enable(&ipu->tasklet);
1459 
1460     ichan->status = IPU_CHANNEL_INITIALIZED;
1461 
1462     return 0;
1463 }
1464 
1465 static int idmac_terminate_all(struct dma_chan *chan)
1466 {
1467     struct idmac_channel *ichan = to_idmac_chan(chan);
1468     int ret;
1469 
1470     mutex_lock(&ichan->chan_mutex);
1471 
1472     ret = __idmac_terminate_all(chan);
1473 
1474     mutex_unlock(&ichan->chan_mutex);
1475 
1476     return ret;
1477 }
1478 
1479 #ifdef DEBUG
1480 static irqreturn_t ic_sof_irq(int irq, void *dev_id)
1481 {
1482     struct idmac_channel *ichan = dev_id;
1483     printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
1484            irq, ichan->dma_chan.chan_id);
1485     disable_irq_nosync(irq);
1486     return IRQ_HANDLED;
1487 }
1488 
1489 static irqreturn_t ic_eof_irq(int irq, void *dev_id)
1490 {
1491     struct idmac_channel *ichan = dev_id;
1492     printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
1493            irq, ichan->dma_chan.chan_id);
1494     disable_irq_nosync(irq);
1495     return IRQ_HANDLED;
1496 }
1497 
1498 static int ic_sof = -EINVAL, ic_eof = -EINVAL;
1499 #endif
1500 
1501 static int idmac_alloc_chan_resources(struct dma_chan *chan)
1502 {
1503     struct idmac_channel *ichan = to_idmac_chan(chan);
1504     struct idmac *idmac = to_idmac(chan->device);
1505     int ret;
1506 
1507     /* dmaengine.c now guarantees to only offer free channels */
1508     BUG_ON(chan->client_count > 1);
1509     WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1510 
1511     dma_cookie_init(chan);
1512 
1513     ret = ipu_irq_map(chan->chan_id);
1514     if (ret < 0)
1515         goto eimap;
1516 
1517     ichan->eof_irq = ret;
1518 
1519     /*
1520      * Important to first disable the channel, because maybe someone
1521      * used it before us, e.g., the bootloader
1522      */
1523     ipu_disable_channel(idmac, ichan, true);
1524 
1525     ret = ipu_init_channel(idmac, ichan);
1526     if (ret < 0)
1527         goto eichan;
1528 
1529     ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1530               ichan->eof_name, ichan);
1531     if (ret < 0)
1532         goto erirq;
1533 
1534 #ifdef DEBUG
1535     if (chan->chan_id == IDMAC_IC_7) {
1536         ic_sof = ipu_irq_map(69);
1537         if (ic_sof > 0) {
1538             ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
1539             if (ret)
1540                 dev_err(&chan->dev->device, "request irq failed for IC SOF");
1541         }
1542         ic_eof = ipu_irq_map(70);
1543         if (ic_eof > 0) {
1544             ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
1545             if (ret)
1546                 dev_err(&chan->dev->device, "request irq failed for IC EOF");
1547         }
1548     }
1549 #endif
1550 
1551     ichan->status = IPU_CHANNEL_INITIALIZED;
1552 
1553     dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
1554         chan->chan_id, ichan->eof_irq);
1555 
1556     return ret;
1557 
1558 erirq:
1559     ipu_uninit_channel(idmac, ichan);
1560 eichan:
1561     ipu_irq_unmap(chan->chan_id);
1562 eimap:
1563     return ret;
1564 }
1565 
1566 static void idmac_free_chan_resources(struct dma_chan *chan)
1567 {
1568     struct idmac_channel *ichan = to_idmac_chan(chan);
1569     struct idmac *idmac = to_idmac(chan->device);
1570 
1571     mutex_lock(&ichan->chan_mutex);
1572 
1573     __idmac_terminate_all(chan);
1574 
1575     if (ichan->status > IPU_CHANNEL_FREE) {
1576 #ifdef DEBUG
1577         if (chan->chan_id == IDMAC_IC_7) {
1578             if (ic_sof > 0) {
1579                 free_irq(ic_sof, ichan);
1580                 ipu_irq_unmap(69);
1581                 ic_sof = -EINVAL;
1582             }
1583             if (ic_eof > 0) {
1584                 free_irq(ic_eof, ichan);
1585                 ipu_irq_unmap(70);
1586                 ic_eof = -EINVAL;
1587             }
1588         }
1589 #endif
1590         free_irq(ichan->eof_irq, ichan);
1591         ipu_irq_unmap(chan->chan_id);
1592     }
1593 
1594     ichan->status = IPU_CHANNEL_FREE;
1595 
1596     ipu_uninit_channel(idmac, ichan);
1597 
1598     mutex_unlock(&ichan->chan_mutex);
1599 
1600     tasklet_schedule(&to_ipu(idmac)->tasklet);
1601 }
1602 
1603 static enum dma_status idmac_tx_status(struct dma_chan *chan,
1604                dma_cookie_t cookie, struct dma_tx_state *txstate)
1605 {
1606     return dma_cookie_status(chan, cookie, txstate);
1607 }
1608 
1609 static int __init ipu_idmac_init(struct ipu *ipu)
1610 {
1611     struct idmac *idmac = &ipu->idmac;
1612     struct dma_device *dma = &idmac->dma;
1613     int i;
1614 
1615     dma_cap_set(DMA_SLAVE, dma->cap_mask);
1616     dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1617 
1618     /* Compulsory common fields */
1619     dma->dev                = ipu->dev;
1620     dma->device_alloc_chan_resources    = idmac_alloc_chan_resources;
1621     dma->device_free_chan_resources     = idmac_free_chan_resources;
1622     dma->device_tx_status           = idmac_tx_status;
1623     dma->device_issue_pending       = idmac_issue_pending;
1624 
1625     /* Compulsory for DMA_SLAVE fields */
1626     dma->device_prep_slave_sg       = idmac_prep_slave_sg;
1627     dma->device_pause           = idmac_pause;
1628     dma->device_terminate_all       = idmac_terminate_all;
1629 
1630     INIT_LIST_HEAD(&dma->channels);
1631     for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1632         struct idmac_channel *ichan = ipu->channel + i;
1633         struct dma_chan *dma_chan = &ichan->dma_chan;
1634 
1635         spin_lock_init(&ichan->lock);
1636         mutex_init(&ichan->chan_mutex);
1637 
1638         ichan->status       = IPU_CHANNEL_FREE;
1639         ichan->sec_chan_en  = false;
1640         snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1641 
1642         dma_chan->device    = &idmac->dma;
1643         dma_cookie_init(dma_chan);
1644         dma_chan->chan_id   = i;
1645         list_add_tail(&dma_chan->device_node, &dma->channels);
1646     }
1647 
1648     idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
1649 
1650     return dma_async_device_register(&idmac->dma);
1651 }
1652 
1653 static void ipu_idmac_exit(struct ipu *ipu)
1654 {
1655     int i;
1656     struct idmac *idmac = &ipu->idmac;
1657 
1658     for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1659         struct idmac_channel *ichan = ipu->channel + i;
1660 
1661         idmac_terminate_all(&ichan->dma_chan);
1662     }
1663 
1664     dma_async_device_unregister(&idmac->dma);
1665 }
1666 
1667 /*****************************************************************************
1668  * IPU common probe / remove
1669  */
1670 
1671 static int __init ipu_probe(struct platform_device *pdev)
1672 {
1673     struct resource *mem_ipu, *mem_ic;
1674     int ret;
1675 
1676     spin_lock_init(&ipu_data.lock);
1677 
1678     mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1679     mem_ic  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1680     if (!mem_ipu || !mem_ic)
1681         return -EINVAL;
1682 
1683     ipu_data.dev = &pdev->dev;
1684 
1685     platform_set_drvdata(pdev, &ipu_data);
1686 
1687     ret = platform_get_irq(pdev, 0);
1688     if (ret < 0)
1689         goto err_noirq;
1690 
1691     ipu_data.irq_fn = ret;
1692     ret = platform_get_irq(pdev, 1);
1693     if (ret < 0)
1694         goto err_noirq;
1695 
1696     ipu_data.irq_err = ret;
1697 
1698     dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
1699         ipu_data.irq_fn, ipu_data.irq_err);
1700 
1701     /* Remap IPU common registers */
1702     ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
1703     if (!ipu_data.reg_ipu) {
1704         ret = -ENOMEM;
1705         goto err_ioremap_ipu;
1706     }
1707 
1708     /* Remap Image Converter and Image DMA Controller registers */
1709     ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
1710     if (!ipu_data.reg_ic) {
1711         ret = -ENOMEM;
1712         goto err_ioremap_ic;
1713     }
1714 
1715     /* Get IPU clock */
1716     ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
1717     if (IS_ERR(ipu_data.ipu_clk)) {
1718         ret = PTR_ERR(ipu_data.ipu_clk);
1719         goto err_clk_get;
1720     }
1721 
1722     /* Make sure IPU HSP clock is running */
1723     clk_prepare_enable(ipu_data.ipu_clk);
1724 
1725     /* Disable all interrupts */
1726     idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
1727     idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
1728     idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
1729     idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
1730     idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
1731 
1732     dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
1733         (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
1734 
1735     ret = ipu_irq_attach_irq(&ipu_data, pdev);
1736     if (ret < 0)
1737         goto err_attach_irq;
1738 
1739     /* Initialize DMA engine */
1740     ret = ipu_idmac_init(&ipu_data);
1741     if (ret < 0)
1742         goto err_idmac_init;
1743 
1744     tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
1745 
1746     ipu_data.dev = &pdev->dev;
1747 
1748     dev_dbg(ipu_data.dev, "IPU initialized\n");
1749 
1750     return 0;
1751 
1752 err_idmac_init:
1753 err_attach_irq:
1754     ipu_irq_detach_irq(&ipu_data, pdev);
1755     clk_disable_unprepare(ipu_data.ipu_clk);
1756     clk_put(ipu_data.ipu_clk);
1757 err_clk_get:
1758     iounmap(ipu_data.reg_ic);
1759 err_ioremap_ic:
1760     iounmap(ipu_data.reg_ipu);
1761 err_ioremap_ipu:
1762 err_noirq:
1763     dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
1764     return ret;
1765 }
1766 
1767 static int ipu_remove(struct platform_device *pdev)
1768 {
1769     struct ipu *ipu = platform_get_drvdata(pdev);
1770 
1771     ipu_idmac_exit(ipu);
1772     ipu_irq_detach_irq(ipu, pdev);
1773     clk_disable_unprepare(ipu->ipu_clk);
1774     clk_put(ipu->ipu_clk);
1775     iounmap(ipu->reg_ic);
1776     iounmap(ipu->reg_ipu);
1777     tasklet_kill(&ipu->tasklet);
1778 
1779     return 0;
1780 }
1781 
1782 /*
1783  * We need two MEM resources - with IPU-common and Image Converter registers,
1784  * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
1785  */
1786 static struct platform_driver ipu_platform_driver = {
1787     .driver = {
1788         .name   = "ipu-core",
1789     },
1790     .remove     = ipu_remove,
1791 };
1792 
1793 static int __init ipu_init(void)
1794 {
1795     return platform_driver_probe(&ipu_platform_driver, ipu_probe);
1796 }
1797 subsys_initcall(ipu_init);
1798 
1799 MODULE_DESCRIPTION("IPU core driver");
1800 MODULE_LICENSE("GPL v2");
1801 MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1802 MODULE_ALIAS("platform:ipu-core");