Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
0004  * Copyright (c) 2020, Linaro Limited
0005  */
0006 
0007 #include <dt-bindings/dma/qcom-gpi.h>
0008 #include <linux/bitfield.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/module.h>
0012 #include <linux/of_dma.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/dma/qcom-gpi-dma.h>
0015 #include <linux/scatterlist.h>
0016 #include <linux/slab.h>
0017 #include "../dmaengine.h"
0018 #include "../virt-dma.h"
0019 
0020 #define TRE_TYPE_DMA        0x10
0021 #define TRE_TYPE_GO     0x20
0022 #define TRE_TYPE_CONFIG0    0x22
0023 
0024 /* TRE flags */
0025 #define TRE_FLAGS_CHAIN     BIT(0)
0026 #define TRE_FLAGS_IEOB      BIT(8)
0027 #define TRE_FLAGS_IEOT      BIT(9)
0028 #define TRE_FLAGS_BEI       BIT(10)
0029 #define TRE_FLAGS_LINK      BIT(11)
0030 #define TRE_FLAGS_TYPE      GENMASK(23, 16)
0031 
0032 /* SPI CONFIG0 WD0 */
0033 #define TRE_SPI_C0_WORD_SZ  GENMASK(4, 0)
0034 #define TRE_SPI_C0_LOOPBACK BIT(8)
0035 #define TRE_SPI_C0_CS       BIT(11)
0036 #define TRE_SPI_C0_CPHA     BIT(12)
0037 #define TRE_SPI_C0_CPOL     BIT(13)
0038 #define TRE_SPI_C0_TX_PACK  BIT(24)
0039 #define TRE_SPI_C0_RX_PACK  BIT(25)
0040 
0041 /* CONFIG0 WD2 */
0042 #define TRE_C0_CLK_DIV      GENMASK(11, 0)
0043 #define TRE_C0_CLK_SRC      GENMASK(19, 16)
0044 
0045 /* SPI GO WD0 */
0046 #define TRE_SPI_GO_CMD      GENMASK(4, 0)
0047 #define TRE_SPI_GO_CS       GENMASK(10, 8)
0048 #define TRE_SPI_GO_FRAG     BIT(26)
0049 
0050 /* GO WD2 */
0051 #define TRE_RX_LEN      GENMASK(23, 0)
0052 
0053 /* I2C Config0 WD0 */
0054 #define TRE_I2C_C0_TLOW     GENMASK(7, 0)
0055 #define TRE_I2C_C0_THIGH    GENMASK(15, 8)
0056 #define TRE_I2C_C0_TCYL     GENMASK(23, 16)
0057 #define TRE_I2C_C0_TX_PACK  BIT(24)
0058 #define TRE_I2C_C0_RX_PACK      BIT(25)
0059 
0060 /* I2C GO WD0 */
0061 #define TRE_I2C_GO_CMD          GENMASK(4, 0)
0062 #define TRE_I2C_GO_ADDR     GENMASK(14, 8)
0063 #define TRE_I2C_GO_STRETCH  BIT(26)
0064 
0065 /* DMA TRE */
0066 #define TRE_DMA_LEN     GENMASK(23, 0)
0067 
0068 /* Register offsets from gpi-top */
0069 #define GPII_n_CH_k_CNTXT_0_OFFS(n, k)  (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
0070 #define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
0071 #define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20)
0072 #define GPII_n_CH_k_CNTXT_0_ERIDX   GENMASK(18, 14)
0073 #define GPII_n_CH_k_CNTXT_0_DIR     BIT(3)
0074 #define GPII_n_CH_k_CNTXT_0_PROTO   GENMASK(2, 0)
0075 
0076 #define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto)  \
0077     (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size)   | \
0078      FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex)     | \
0079      FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir)       | \
0080      FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto))
0081 
0082 #define GPI_CHTYPE_DIR_IN   (0)
0083 #define GPI_CHTYPE_DIR_OUT  (1)
0084 
0085 #define GPI_CHTYPE_PROTO_GPI    (0x2)
0086 
0087 #define GPII_n_CH_k_DOORBELL_0_OFFS(n, k)   (0x22000 + (0x4000 * (n)) + (0x8 * (k)))
0088 #define GPII_n_CH_CMD_OFFS(n)           (0x23008 + (0x4000 * (n)))
0089 #define GPII_n_CH_CMD_OPCODE            GENMASK(31, 24)
0090 #define GPII_n_CH_CMD_CHID          GENMASK(7, 0)
0091 #define GPII_n_CH_CMD(opcode, chid)              \
0092              (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \
0093               FIELD_PREP(GPII_n_CH_CMD_CHID, chid))
0094 
0095 #define GPII_n_CH_CMD_ALLOCATE      (0)
0096 #define GPII_n_CH_CMD_START     (1)
0097 #define GPII_n_CH_CMD_STOP      (2)
0098 #define GPII_n_CH_CMD_RESET     (9)
0099 #define GPII_n_CH_CMD_DE_ALLOC      (10)
0100 #define GPII_n_CH_CMD_UART_SW_STALE (32)
0101 #define GPII_n_CH_CMD_UART_RFR_READY    (33)
0102 #define GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
0103 
0104 /* EV Context Array */
0105 #define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
0106 #define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
0107 #define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20)
0108 #define GPII_n_EV_k_CNTXT_0_INTYPE  BIT(16)
0109 #define GPII_n_EV_k_CNTXT_0_CHTYPE  GENMASK(3, 0)
0110 
0111 #define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype)       \
0112     (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \
0113      FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype)  | \
0114      FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype))
0115 
0116 #define GPI_INTTYPE_IRQ     (1)
0117 #define GPI_CHTYPE_GPI_EV   (0x2)
0118 
0119 enum CNTXT_OFFS {
0120     CNTXT_0_CONFIG = 0x0,
0121     CNTXT_1_R_LENGTH = 0x4,
0122     CNTXT_2_RING_BASE_LSB = 0x8,
0123     CNTXT_3_RING_BASE_MSB = 0xC,
0124     CNTXT_4_RING_RP_LSB = 0x10,
0125     CNTXT_5_RING_RP_MSB = 0x14,
0126     CNTXT_6_RING_WP_LSB = 0x18,
0127     CNTXT_7_RING_WP_MSB = 0x1C,
0128     CNTXT_8_RING_INT_MOD = 0x20,
0129     CNTXT_9_RING_INTVEC = 0x24,
0130     CNTXT_10_RING_MSI_LSB = 0x28,
0131     CNTXT_11_RING_MSI_MSB = 0x2C,
0132     CNTXT_12_RING_RP_UPDATE_LSB = 0x30,
0133     CNTXT_13_RING_RP_UPDATE_MSB = 0x34,
0134 };
0135 
0136 #define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k)    (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
0137 #define GPII_n_EV_CH_CMD_OFFS(n)        (0x23010 + (0x4000 * (n)))
0138 #define GPII_n_EV_CMD_OPCODE            GENMASK(31, 24)
0139 #define GPII_n_EV_CMD_CHID          GENMASK(7, 0)
0140 #define GPII_n_EV_CMD(opcode, chid)              \
0141              (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \
0142               FIELD_PREP(GPII_n_EV_CMD_CHID, chid))
0143 
0144 #define GPII_n_EV_CH_CMD_ALLOCATE       (0x00)
0145 #define GPII_n_EV_CH_CMD_RESET          (0x09)
0146 #define GPII_n_EV_CH_CMD_DE_ALLOC       (0x0A)
0147 
0148 #define GPII_n_CNTXT_TYPE_IRQ_OFFS(n)       (0x23080 + (0x4000 * (n)))
0149 
0150 /* mask type register */
0151 #define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n)   (0x23088 + (0x4000 * (n)))
0152 #define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK      GENMASK(6, 0)
0153 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL   BIT(6)
0154 #define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB      BIT(3)
0155 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB      BIT(2)
0156 #define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL   BIT(1)
0157 #define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL   BIT(0)
0158 
0159 #define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n)    (0x23090 + (0x4000 * (n)))
0160 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n)  (0x23094 + (0x4000 * (n)))
0161 
0162 /* Mask channel control interrupt register */
0163 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n)))
0164 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK    GENMASK(1, 0)
0165 
0166 /* Mask event control interrupt register */
0167 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n)  (0x2309C + (0x4000 * (n)))
0168 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0)
0169 
0170 #define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n)))
0171 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n)  (0x230A4 + (0x4000 * (n)))
0172 
0173 /* Mask event interrupt register */
0174 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n)   (0x230B8 + (0x4000 * (n)))
0175 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK  BIT(0)
0176 
0177 #define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n)   (0x230C0 + (0x4000 * (n)))
0178 #define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n)  (0x23100 + (0x4000 * (n)))
0179 #define GPI_GLOB_IRQ_ERROR_INT_MSK      BIT(0)
0180 
0181 /* GPII specific Global - Enable bit register */
0182 #define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n)    (0x23108 + (0x4000 * (n)))
0183 #define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n)   (0x23110 + (0x4000 * (n)))
0184 #define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n)  (0x23118 + (0x4000 * (n)))
0185 
0186 /* GPII general interrupt - Enable bit register */
0187 #define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n)    (0x23120 + (0x4000 * (n)))
0188 #define GPII_n_CNTXT_GPII_IRQ_EN_BMSK       GENMASK(3, 0)
0189 
0190 #define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n)   (0x23128 + (0x4000 * (n)))
0191 
0192 /* GPII Interrupt Type register */
0193 #define GPII_n_CNTXT_INTSET_OFFS(n)     (0x23180 + (0x4000 * (n)))
0194 #define GPII_n_CNTXT_INTSET_BMSK        BIT(0)
0195 
0196 #define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n)   (0x23188 + (0x4000 * (n)))
0197 #define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n)   (0x2318C + (0x4000 * (n)))
0198 #define GPII_n_CNTXT_SCRATCH_0_OFFS(n)      (0x23400 + (0x4000 * (n)))
0199 #define GPII_n_CNTXT_SCRATCH_1_OFFS(n)      (0x23404 + (0x4000 * (n)))
0200 
0201 #define GPII_n_ERROR_LOG_OFFS(n)        (0x23200 + (0x4000 * (n)))
0202 
0203 /* QOS Registers */
0204 #define GPII_n_CH_k_QOS_OFFS(n, k)      (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
0205 
0206 /* Scratch registers */
0207 #define GPII_n_CH_k_SCRATCH_0_OFFS(n, k)    (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
0208 #define GPII_n_CH_k_SCRATCH_0_SEID      GENMASK(2, 0)
0209 #define GPII_n_CH_k_SCRATCH_0_PROTO     GENMASK(7, 4)
0210 #define GPII_n_CH_k_SCRATCH_0_PAIR      GENMASK(20, 16)
0211 #define GPII_n_CH_k_SCRATCH_0(pair, proto, seid)        \
0212                  (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair)  | \
0213                   FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto)    | \
0214                   FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid))
0215 #define GPII_n_CH_k_SCRATCH_1_OFFS(n, k)    (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
0216 #define GPII_n_CH_k_SCRATCH_2_OFFS(n, k)    (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
0217 #define GPII_n_CH_k_SCRATCH_3_OFFS(n, k)    (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
0218 
0219 struct __packed gpi_tre {
0220     u32 dword[4];
0221 };
0222 
0223 enum msm_gpi_tce_code {
0224     MSM_GPI_TCE_SUCCESS = 1,
0225     MSM_GPI_TCE_EOT = 2,
0226     MSM_GPI_TCE_EOB = 4,
0227     MSM_GPI_TCE_UNEXP_ERR = 16,
0228 };
0229 
0230 #define CMD_TIMEOUT_MS      (250)
0231 
0232 #define MAX_CHANNELS_PER_GPII   (2)
0233 #define GPI_TX_CHAN     (0)
0234 #define GPI_RX_CHAN     (1)
0235 #define STATE_IGNORE        (U32_MAX)
0236 #define EV_FACTOR       (2)
0237 #define REQ_OF_DMA_ARGS     (5) /* # of arguments required from client */
0238 #define CHAN_TRES       64
0239 
0240 struct __packed xfer_compl_event {
0241     u64 ptr;
0242     u32 length:24;
0243     u8 code;
0244     u16 status;
0245     u8 type;
0246     u8 chid;
0247 };
0248 
0249 struct __packed immediate_data_event {
0250     u8 data_bytes[8];
0251     u8 length:4;
0252     u8 resvd:4;
0253     u16 tre_index;
0254     u8 code;
0255     u16 status;
0256     u8 type;
0257     u8 chid;
0258 };
0259 
0260 struct __packed qup_notif_event {
0261     u32 status;
0262     u32 time;
0263     u32 count:24;
0264     u8 resvd;
0265     u16 resvd1;
0266     u8 type;
0267     u8 chid;
0268 };
0269 
0270 struct __packed gpi_ere {
0271     u32 dword[4];
0272 };
0273 
0274 enum GPI_EV_TYPE {
0275     XFER_COMPLETE_EV_TYPE = 0x22,
0276     IMMEDIATE_DATA_EV_TYPE = 0x30,
0277     QUP_NOTIF_EV_TYPE = 0x31,
0278     STALE_EV_TYPE = 0xFF,
0279 };
0280 
0281 union __packed gpi_event {
0282     struct __packed xfer_compl_event xfer_compl_event;
0283     struct __packed immediate_data_event immediate_data_event;
0284     struct __packed qup_notif_event qup_notif_event;
0285     struct __packed gpi_ere gpi_ere;
0286 };
0287 
0288 enum gpii_irq_settings {
0289     DEFAULT_IRQ_SETTINGS,
0290     MASK_IEOB_SETTINGS,
0291 };
0292 
0293 enum gpi_ev_state {
0294     DEFAULT_EV_CH_STATE = 0,
0295     EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
0296     EV_STATE_ALLOCATED,
0297     MAX_EV_STATES
0298 };
0299 
0300 static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
0301     [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
0302     [EV_STATE_ALLOCATED] = "ALLOCATED",
0303 };
0304 
0305 #define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \
0306                     "INVALID" : gpi_ev_state_str[(_state)])
0307 
0308 enum gpi_ch_state {
0309     DEFAULT_CH_STATE = 0x0,
0310     CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
0311     CH_STATE_ALLOCATED = 0x1,
0312     CH_STATE_STARTED = 0x2,
0313     CH_STATE_STOPPED = 0x3,
0314     CH_STATE_STOP_IN_PROC = 0x4,
0315     CH_STATE_ERROR = 0xf,
0316     MAX_CH_STATES
0317 };
0318 
0319 enum gpi_cmd {
0320     GPI_CH_CMD_BEGIN,
0321     GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
0322     GPI_CH_CMD_START,
0323     GPI_CH_CMD_STOP,
0324     GPI_CH_CMD_RESET,
0325     GPI_CH_CMD_DE_ALLOC,
0326     GPI_CH_CMD_UART_SW_STALE,
0327     GPI_CH_CMD_UART_RFR_READY,
0328     GPI_CH_CMD_UART_RFR_NOT_READY,
0329     GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
0330     GPI_EV_CMD_BEGIN,
0331     GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
0332     GPI_EV_CMD_RESET,
0333     GPI_EV_CMD_DEALLOC,
0334     GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
0335     GPI_MAX_CMD,
0336 };
0337 
0338 #define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END)
0339 
0340 static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
0341     [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
0342     [GPI_CH_CMD_START] = "CH START",
0343     [GPI_CH_CMD_STOP] = "CH STOP",
0344     [GPI_CH_CMD_RESET] = "CH_RESET",
0345     [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
0346     [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
0347     [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
0348     [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
0349     [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
0350     [GPI_EV_CMD_RESET] = "EV RESET",
0351     [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
0352 };
0353 
0354 #define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \
0355                   gpi_cmd_str[(_cmd)])
0356 
0357 /*
0358  * @DISABLE_STATE: no register access allowed
0359  * @CONFIG_STATE:  client has configured the channel
0360  * @PREP_HARDWARE: register access is allowed
0361  *         however, no processing EVENTS
0362  * @ACTIVE_STATE: channels are fully operational
0363  * @PREPARE_TERMINATE: graceful termination of channels
0364  *             register access is allowed
0365  * @PAUSE_STATE: channels are active, but not processing any events
0366  */
0367 enum gpi_pm_state {
0368     DISABLE_STATE,
0369     CONFIG_STATE,
0370     PREPARE_HARDWARE,
0371     ACTIVE_STATE,
0372     PREPARE_TERMINATE,
0373     PAUSE_STATE,
0374     MAX_PM_STATE
0375 };
0376 
0377 #define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE)
0378 
0379 static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
0380     [DISABLE_STATE] = "DISABLE",
0381     [CONFIG_STATE] = "CONFIG",
0382     [PREPARE_HARDWARE] = "PREPARE HARDWARE",
0383     [ACTIVE_STATE] = "ACTIVE",
0384     [PREPARE_TERMINATE] = "PREPARE TERMINATE",
0385     [PAUSE_STATE] = "PAUSE",
0386 };
0387 
0388 #define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \
0389                   "INVALID" : gpi_pm_state_str[(_state)])
0390 
0391 static const struct {
0392     enum gpi_cmd gpi_cmd;
0393     u32 opcode;
0394     u32 state;
0395 } gpi_cmd_info[GPI_MAX_CMD] = {
0396     {
0397         GPI_CH_CMD_ALLOCATE,
0398         GPII_n_CH_CMD_ALLOCATE,
0399         CH_STATE_ALLOCATED,
0400     },
0401     {
0402         GPI_CH_CMD_START,
0403         GPII_n_CH_CMD_START,
0404         CH_STATE_STARTED,
0405     },
0406     {
0407         GPI_CH_CMD_STOP,
0408         GPII_n_CH_CMD_STOP,
0409         CH_STATE_STOPPED,
0410     },
0411     {
0412         GPI_CH_CMD_RESET,
0413         GPII_n_CH_CMD_RESET,
0414         CH_STATE_ALLOCATED,
0415     },
0416     {
0417         GPI_CH_CMD_DE_ALLOC,
0418         GPII_n_CH_CMD_DE_ALLOC,
0419         CH_STATE_NOT_ALLOCATED,
0420     },
0421     {
0422         GPI_CH_CMD_UART_SW_STALE,
0423         GPII_n_CH_CMD_UART_SW_STALE,
0424         STATE_IGNORE,
0425     },
0426     {
0427         GPI_CH_CMD_UART_RFR_READY,
0428         GPII_n_CH_CMD_UART_RFR_READY,
0429         STATE_IGNORE,
0430     },
0431     {
0432         GPI_CH_CMD_UART_RFR_NOT_READY,
0433         GPII_n_CH_CMD_UART_RFR_NOT_READY,
0434         STATE_IGNORE,
0435     },
0436     {
0437         GPI_EV_CMD_ALLOCATE,
0438         GPII_n_EV_CH_CMD_ALLOCATE,
0439         EV_STATE_ALLOCATED,
0440     },
0441     {
0442         GPI_EV_CMD_RESET,
0443         GPII_n_EV_CH_CMD_RESET,
0444         EV_STATE_ALLOCATED,
0445     },
0446     {
0447         GPI_EV_CMD_DEALLOC,
0448         GPII_n_EV_CH_CMD_DE_ALLOC,
0449         EV_STATE_NOT_ALLOCATED,
0450     },
0451 };
0452 
0453 struct gpi_ring {
0454     void *pre_aligned;
0455     size_t alloc_size;
0456     phys_addr_t phys_addr;
0457     dma_addr_t dma_handle;
0458     void *base;
0459     void *wp;
0460     void *rp;
0461     u32 len;
0462     u32 el_size;
0463     u32 elements;
0464     bool configured;
0465 };
0466 
0467 struct gpi_dev {
0468     struct dma_device dma_device;
0469     struct device *dev;
0470     struct resource *res;
0471     void __iomem *regs;
0472     void __iomem *ee_base; /*ee register base address*/
0473     u32 max_gpii; /* maximum # of gpii instances available per gpi block */
0474     u32 gpii_mask; /* gpii instances available for apps */
0475     u32 ev_factor; /* ev ring length factor */
0476     struct gpii *gpiis;
0477 };
0478 
0479 struct reg_info {
0480     char *name;
0481     u32 offset;
0482     u32 val;
0483 };
0484 
0485 struct gchan {
0486     struct virt_dma_chan vc;
0487     u32 chid;
0488     u32 seid;
0489     u32 protocol;
0490     struct gpii *gpii;
0491     enum gpi_ch_state ch_state;
0492     enum gpi_pm_state pm_state;
0493     void __iomem *ch_cntxt_base_reg;
0494     void __iomem *ch_cntxt_db_reg;
0495     void __iomem *ch_cmd_reg;
0496     u32 dir;
0497     struct gpi_ring ch_ring;
0498     void *config;
0499 };
0500 
0501 struct gpii {
0502     u32 gpii_id;
0503     struct gchan gchan[MAX_CHANNELS_PER_GPII];
0504     struct gpi_dev *gpi_dev;
0505     int irq;
0506     void __iomem *regs; /* points to gpi top */
0507     void __iomem *ev_cntxt_base_reg;
0508     void __iomem *ev_cntxt_db_reg;
0509     void __iomem *ev_ring_rp_lsb_reg;
0510     void __iomem *ev_cmd_reg;
0511     void __iomem *ieob_clr_reg;
0512     struct mutex ctrl_lock;
0513     enum gpi_ev_state ev_state;
0514     bool configured_irq;
0515     enum gpi_pm_state pm_state;
0516     rwlock_t pm_lock;
0517     struct gpi_ring ev_ring;
0518     struct tasklet_struct ev_task; /* event processing tasklet */
0519     struct completion cmd_completion;
0520     enum gpi_cmd gpi_cmd;
0521     u32 cntxt_type_irq_msk;
0522     bool ieob_set;
0523 };
0524 
0525 #define MAX_TRE 3
0526 
0527 struct gpi_desc {
0528     struct virt_dma_desc vd;
0529     size_t len;
0530     void *db; /* DB register to program */
0531     struct gchan *gchan;
0532     struct gpi_tre tre[MAX_TRE];
0533     u32 num_tre;
0534 };
0535 
0536 static const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
0537     GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
0538 };
0539 
0540 static irqreturn_t gpi_handle_irq(int irq, void *data);
0541 static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
0542 static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
0543 static void gpi_process_events(struct gpii *gpii);
0544 
0545 static inline struct gchan *to_gchan(struct dma_chan *dma_chan)
0546 {
0547     return container_of(dma_chan, struct gchan, vc.chan);
0548 }
0549 
0550 static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
0551 {
0552     return container_of(vd, struct gpi_desc, vd);
0553 }
0554 
0555 static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
0556                       void *addr)
0557 {
0558     return ring->phys_addr + (addr - ring->base);
0559 }
0560 
0561 static inline void *to_virtual(const struct gpi_ring *const ring, phys_addr_t addr)
0562 {
0563     return ring->base + (addr - ring->phys_addr);
0564 }
0565 
0566 static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
0567 {
0568     return readl_relaxed(addr);
0569 }
0570 
0571 static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
0572 {
0573     writel_relaxed(val, addr);
0574 }
0575 
0576 /* gpi_write_reg_field - write to specific bit field */
0577 static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
0578                        u32 mask, u32 shift, u32 val)
0579 {
0580     u32 tmp = gpi_read_reg(gpii, addr);
0581 
0582     tmp &= ~mask;
0583     val = tmp | ((val << shift) & mask);
0584     gpi_write_reg(gpii, addr, val);
0585 }
0586 
0587 static __always_inline void
0588 gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
0589 {
0590     void __iomem *addr = gpii->regs + offset;
0591     u32 tmp = gpi_read_reg(gpii, addr);
0592 
0593     tmp &= ~mask;
0594     tmp |= u32_encode_bits(val, mask);
0595 
0596     gpi_write_reg(gpii, addr, tmp);
0597 }
0598 
0599 static void gpi_disable_interrupts(struct gpii *gpii)
0600 {
0601     gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
0602                GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, 0);
0603     gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
0604                GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, 0);
0605     gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
0606                GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, 0);
0607     gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
0608                GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, 0);
0609     gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
0610                GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
0611     gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
0612                GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
0613     gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
0614                GPII_n_CNTXT_INTSET_BMSK, 0);
0615 
0616     gpii->cntxt_type_irq_msk = 0;
0617     devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
0618     gpii->configured_irq = false;
0619 }
0620 
0621 /* configure and enable interrupts */
0622 static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask)
0623 {
0624     const u32 enable = (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
0625                   GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
0626                   GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
0627                   GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
0628                   GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
0629     int ret;
0630 
0631     if (!gpii->configured_irq) {
0632         ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
0633                        gpi_handle_irq, IRQF_TRIGGER_HIGH,
0634                        "gpi-dma", gpii);
0635         if (ret < 0) {
0636             dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n",
0637                 gpii->irq, ret);
0638             return ret;
0639         }
0640     }
0641 
0642     if (settings == MASK_IEOB_SETTINGS) {
0643         /*
0644          * GPII only uses one EV ring per gpii so we can globally
0645          * enable/disable IEOB interrupt
0646          */
0647         if (mask)
0648             gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
0649         else
0650             gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
0651         gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
0652                    GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk);
0653     } else {
0654         gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
0655                    GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, enable);
0656         gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
0657                    GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
0658                    GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK);
0659         gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
0660                    GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
0661                    GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK);
0662         gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
0663                    GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
0664                    GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK);
0665         gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
0666                    GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
0667                    GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
0668         gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
0669                    GPII_n_CNTXT_GPII_IRQ_EN_BMSK, GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
0670         gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0);
0671         gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0);
0672         gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0);
0673         gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0);
0674         gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
0675                    GPII_n_CNTXT_INTSET_BMSK, 1);
0676         gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0);
0677 
0678         gpii->cntxt_type_irq_msk = enable;
0679     }
0680 
0681     gpii->configured_irq = true;
0682     return 0;
0683 }
0684 
0685 /* Sends gpii event or channel command */
0686 static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan,
0687             enum gpi_cmd gpi_cmd)
0688 {
0689     u32 chid = MAX_CHANNELS_PER_GPII;
0690     unsigned long timeout;
0691     void __iomem *cmd_reg;
0692     u32 cmd;
0693 
0694     if (gpi_cmd >= GPI_MAX_CMD)
0695         return -EINVAL;
0696     if (IS_CHAN_CMD(gpi_cmd))
0697         chid = gchan->chid;
0698 
0699     dev_dbg(gpii->gpi_dev->dev,
0700         "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd), chid);
0701 
0702     /* send opcode and wait for completion */
0703     reinit_completion(&gpii->cmd_completion);
0704     gpii->gpi_cmd = gpi_cmd;
0705 
0706     cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg;
0707     cmd = IS_CHAN_CMD(gpi_cmd) ? GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
0708                      GPII_n_EV_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
0709     gpi_write_reg(gpii, cmd_reg, cmd);
0710     timeout = wait_for_completion_timeout(&gpii->cmd_completion,
0711                           msecs_to_jiffies(CMD_TIMEOUT_MS));
0712     if (!timeout) {
0713         dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n",
0714             TO_GPI_CMD_STR(gpi_cmd), chid);
0715         return -EIO;
0716     }
0717 
0718     /* confirm new ch state is correct , if the cmd is a state change cmd */
0719     if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
0720         return 0;
0721 
0722     if (IS_CHAN_CMD(gpi_cmd) && gchan->ch_state == gpi_cmd_info[gpi_cmd].state)
0723         return 0;
0724 
0725     if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
0726         return 0;
0727 
0728     return -EIO;
0729 }
0730 
0731 /* program transfer ring DB register */
0732 static inline void gpi_write_ch_db(struct gchan *gchan,
0733                    struct gpi_ring *ring, void *wp)
0734 {
0735     struct gpii *gpii = gchan->gpii;
0736     phys_addr_t p_wp;
0737 
0738     p_wp = to_physical(ring, wp);
0739     gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp);
0740 }
0741 
0742 /* program event ring DB register */
0743 static inline void gpi_write_ev_db(struct gpii *gpii,
0744                    struct gpi_ring *ring, void *wp)
0745 {
0746     phys_addr_t p_wp;
0747 
0748     p_wp = ring->phys_addr + (wp - ring->base);
0749     gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp);
0750 }
0751 
0752 /* process transfer completion interrupt */
0753 static void gpi_process_ieob(struct gpii *gpii)
0754 {
0755     gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
0756 
0757     gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
0758     tasklet_hi_schedule(&gpii->ev_task);
0759 }
0760 
0761 /* process channel control interrupt */
0762 static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
0763 {
0764     u32 gpii_id = gpii->gpii_id;
0765     u32 offset = GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
0766     u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
0767     struct gchan *gchan;
0768     u32 chid, state;
0769 
0770     /* clear the status */
0771     offset = GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
0772     gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
0773 
0774     for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
0775         if (!(BIT(chid) & ch_irq))
0776             continue;
0777 
0778         gchan = &gpii->gchan[chid];
0779         state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg +
0780                      CNTXT_0_CONFIG);
0781         state = FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE, state);
0782 
0783         /*
0784          * CH_CMD_DEALLOC cmd always successful. However cmd does
0785          * not change hardware status. So overwriting software state
0786          * to default state.
0787          */
0788         if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
0789             state = DEFAULT_CH_STATE;
0790         gchan->ch_state = state;
0791 
0792         /*
0793          * Triggering complete all if ch_state is not a stop in process.
0794          * Stop in process is a transition state and we will wait for
0795          * stop interrupt before notifying.
0796          */
0797         if (gchan->ch_state != CH_STATE_STOP_IN_PROC)
0798             complete_all(&gpii->cmd_completion);
0799     }
0800 }
0801 
0802 /* processing gpi general error interrupts */
0803 static void gpi_process_gen_err_irq(struct gpii *gpii)
0804 {
0805     u32 gpii_id = gpii->gpii_id;
0806     u32 offset = GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
0807     u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
0808 
0809     /* clear the status */
0810     dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts);
0811 
0812     /* Clear the register */
0813     offset = GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
0814     gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
0815 }
0816 
0817 /* processing gpi level error interrupts */
0818 static void gpi_process_glob_err_irq(struct gpii *gpii)
0819 {
0820     u32 gpii_id = gpii->gpii_id;
0821     u32 offset = GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
0822     u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
0823 
0824     offset = GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
0825     gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
0826 
0827     /* only error interrupt should be set */
0828     if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
0829         dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts);
0830         return;
0831     }
0832 
0833     offset = GPII_n_ERROR_LOG_OFFS(gpii_id);
0834     gpi_write_reg(gpii, gpii->regs + offset, 0);
0835 }
0836 
0837 /* gpii interrupt handler */
0838 static irqreturn_t gpi_handle_irq(int irq, void *data)
0839 {
0840     struct gpii *gpii = data;
0841     u32 gpii_id = gpii->gpii_id;
0842     u32 type, offset;
0843     unsigned long flags;
0844 
0845     read_lock_irqsave(&gpii->pm_lock, flags);
0846 
0847     /*
0848      * States are out of sync to receive interrupt
0849      * while software state is in DISABLE state, bailing out.
0850      */
0851     if (!REG_ACCESS_VALID(gpii->pm_state)) {
0852         dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n",
0853             TO_GPI_PM_STR(gpii->pm_state));
0854         goto exit_irq;
0855     }
0856 
0857     offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
0858     type = gpi_read_reg(gpii, gpii->regs + offset);
0859 
0860     do {
0861         /* global gpii error */
0862         if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
0863             gpi_process_glob_err_irq(gpii);
0864             type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
0865         }
0866 
0867         /* transfer complete interrupt */
0868         if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
0869             gpi_process_ieob(gpii);
0870             type &= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
0871         }
0872 
0873         /* event control irq */
0874         if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
0875             u32 ev_state;
0876             u32 ev_ch_irq;
0877 
0878             dev_dbg(gpii->gpi_dev->dev,
0879                 "processing EV CTRL interrupt\n");
0880             offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
0881             ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
0882 
0883             offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
0884                 (gpii_id);
0885             gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
0886             ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
0887                         CNTXT_0_CONFIG);
0888             ev_state = FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE, ev_state);
0889 
0890             /*
0891              * CMD EV_CMD_DEALLOC is always successful. However
0892              * cmd does not change hardware status. So overwriting
0893              * software state to default state.
0894              */
0895             if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
0896                 ev_state = DEFAULT_EV_CH_STATE;
0897 
0898             gpii->ev_state = ev_state;
0899             dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n",
0900                 TO_GPI_EV_STATE_STR(gpii->ev_state));
0901             complete_all(&gpii->cmd_completion);
0902             type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
0903         }
0904 
0905         /* channel control irq */
0906         if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
0907             dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n");
0908             gpi_process_ch_ctrl_irq(gpii);
0909             type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
0910         }
0911 
0912         if (type) {
0913             dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type);
0914             gpi_process_gen_err_irq(gpii);
0915             goto exit_irq;
0916         }
0917 
0918         offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
0919         type = gpi_read_reg(gpii, gpii->regs + offset);
0920     } while (type);
0921 
0922 exit_irq:
0923     read_unlock_irqrestore(&gpii->pm_lock, flags);
0924 
0925     return IRQ_HANDLED;
0926 }
0927 
0928 /* process DMA Immediate completion data events */
0929 static void gpi_process_imed_data_event(struct gchan *gchan,
0930                     struct immediate_data_event *imed_event)
0931 {
0932     struct gpii *gpii = gchan->gpii;
0933     struct gpi_ring *ch_ring = &gchan->ch_ring;
0934     void *tre = ch_ring->base + (ch_ring->el_size * imed_event->tre_index);
0935     struct dmaengine_result result;
0936     struct gpi_desc *gpi_desc;
0937     struct virt_dma_desc *vd;
0938     unsigned long flags;
0939     u32 chid;
0940 
0941     /*
0942      * If channel not active don't process event
0943      */
0944     if (gchan->pm_state != ACTIVE_STATE) {
0945         dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
0946             TO_GPI_PM_STR(gchan->pm_state));
0947         return;
0948     }
0949 
0950     spin_lock_irqsave(&gchan->vc.lock, flags);
0951     vd = vchan_next_desc(&gchan->vc);
0952     if (!vd) {
0953         struct gpi_ere *gpi_ere;
0954         struct gpi_tre *gpi_tre;
0955 
0956         spin_unlock_irqrestore(&gchan->vc.lock, flags);
0957         dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n");
0958         gpi_ere = (struct gpi_ere *)imed_event;
0959         dev_dbg(gpii->gpi_dev->dev,
0960             "Event: %08x %08x %08x %08x\n",
0961             gpi_ere->dword[0], gpi_ere->dword[1],
0962             gpi_ere->dword[2], gpi_ere->dword[3]);
0963         gpi_tre = tre;
0964         dev_dbg(gpii->gpi_dev->dev,
0965             "Pending TRE: %08x %08x %08x %08x\n",
0966             gpi_tre->dword[0], gpi_tre->dword[1],
0967             gpi_tre->dword[2], gpi_tre->dword[3]);
0968         return;
0969     }
0970     gpi_desc = to_gpi_desc(vd);
0971     spin_unlock_irqrestore(&gchan->vc.lock, flags);
0972 
0973     /*
0974      * RP pointed by Event is to last TRE processed,
0975      * we need to update ring rp to tre + 1
0976      */
0977     tre += ch_ring->el_size;
0978     if (tre >= (ch_ring->base + ch_ring->len))
0979         tre = ch_ring->base;
0980     ch_ring->rp = tre;
0981 
0982     /* make sure rp updates are immediately visible to all cores */
0983     smp_wmb();
0984 
0985     chid = imed_event->chid;
0986     if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
0987         if (chid == GPI_RX_CHAN)
0988             goto gpi_free_desc;
0989         else
0990             return;
0991     }
0992 
0993     if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
0994         result.result = DMA_TRANS_ABORTED;
0995     else
0996         result.result = DMA_TRANS_NOERROR;
0997     result.residue = gpi_desc->len - imed_event->length;
0998 
0999     dma_cookie_complete(&vd->tx);
1000     dmaengine_desc_get_callback_invoke(&vd->tx, &result);
1001 
1002 gpi_free_desc:
1003     spin_lock_irqsave(&gchan->vc.lock, flags);
1004     list_del(&vd->node);
1005     spin_unlock_irqrestore(&gchan->vc.lock, flags);
1006     kfree(gpi_desc);
1007     gpi_desc = NULL;
1008 }
1009 
1010 /* processing transfer completion events */
1011 static void gpi_process_xfer_compl_event(struct gchan *gchan,
1012                      struct xfer_compl_event *compl_event)
1013 {
1014     struct gpii *gpii = gchan->gpii;
1015     struct gpi_ring *ch_ring = &gchan->ch_ring;
1016     void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1017     struct virt_dma_desc *vd;
1018     struct gpi_desc *gpi_desc;
1019     struct dmaengine_result result;
1020     unsigned long flags;
1021     u32 chid;
1022 
1023     /* only process events on active channel */
1024     if (unlikely(gchan->pm_state != ACTIVE_STATE)) {
1025         dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
1026             TO_GPI_PM_STR(gchan->pm_state));
1027         return;
1028     }
1029 
1030     spin_lock_irqsave(&gchan->vc.lock, flags);
1031     vd = vchan_next_desc(&gchan->vc);
1032     if (!vd) {
1033         struct gpi_ere *gpi_ere;
1034 
1035         spin_unlock_irqrestore(&gchan->vc.lock, flags);
1036         dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n");
1037         gpi_ere = (struct gpi_ere *)compl_event;
1038         dev_err(gpii->gpi_dev->dev,
1039             "Event: %08x %08x %08x %08x\n",
1040             gpi_ere->dword[0], gpi_ere->dword[1],
1041             gpi_ere->dword[2], gpi_ere->dword[3]);
1042         return;
1043     }
1044 
1045     gpi_desc = to_gpi_desc(vd);
1046     spin_unlock_irqrestore(&gchan->vc.lock, flags);
1047 
1048     /*
1049      * RP pointed by Event is to last TRE processed,
1050      * we need to update ring rp to ev_rp + 1
1051      */
1052     ev_rp += ch_ring->el_size;
1053     if (ev_rp >= (ch_ring->base + ch_ring->len))
1054         ev_rp = ch_ring->base;
1055     ch_ring->rp = ev_rp;
1056 
1057     /* update must be visible to other cores */
1058     smp_wmb();
1059 
1060     chid = compl_event->chid;
1061     if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
1062         if (chid == GPI_RX_CHAN)
1063             goto gpi_free_desc;
1064         else
1065             return;
1066     }
1067 
1068     if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) {
1069         dev_err(gpii->gpi_dev->dev, "Error in Transaction\n");
1070         result.result = DMA_TRANS_ABORTED;
1071     } else {
1072         dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n");
1073         result.result = DMA_TRANS_NOERROR;
1074     }
1075     result.residue = gpi_desc->len - compl_event->length;
1076     dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue);
1077 
1078     dma_cookie_complete(&vd->tx);
1079     dmaengine_desc_get_callback_invoke(&vd->tx, &result);
1080 
1081 gpi_free_desc:
1082     spin_lock_irqsave(&gchan->vc.lock, flags);
1083     list_del(&vd->node);
1084     spin_unlock_irqrestore(&gchan->vc.lock, flags);
1085     kfree(gpi_desc);
1086     gpi_desc = NULL;
1087 }
1088 
1089 /* process all events */
1090 static void gpi_process_events(struct gpii *gpii)
1091 {
1092     struct gpi_ring *ev_ring = &gpii->ev_ring;
1093     phys_addr_t cntxt_rp;
1094     void *rp;
1095     union gpi_event *gpi_event;
1096     struct gchan *gchan;
1097     u32 chid, type;
1098 
1099     cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1100     rp = to_virtual(ev_ring, cntxt_rp);
1101 
1102     do {
1103         while (rp != ev_ring->rp) {
1104             gpi_event = ev_ring->rp;
1105             chid = gpi_event->xfer_compl_event.chid;
1106             type = gpi_event->xfer_compl_event.type;
1107 
1108             dev_dbg(gpii->gpi_dev->dev,
1109                 "Event: CHID:%u, type:%x %08x %08x %08x %08x\n",
1110                 chid, type, gpi_event->gpi_ere.dword[0],
1111                 gpi_event->gpi_ere.dword[1], gpi_event->gpi_ere.dword[2],
1112                 gpi_event->gpi_ere.dword[3]);
1113 
1114             switch (type) {
1115             case XFER_COMPLETE_EV_TYPE:
1116                 gchan = &gpii->gchan[chid];
1117                 gpi_process_xfer_compl_event(gchan,
1118                                  &gpi_event->xfer_compl_event);
1119                 break;
1120             case STALE_EV_TYPE:
1121                 dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n");
1122                 break;
1123             case IMMEDIATE_DATA_EV_TYPE:
1124                 gchan = &gpii->gchan[chid];
1125                 gpi_process_imed_data_event(gchan,
1126                                 &gpi_event->immediate_data_event);
1127                 break;
1128             case QUP_NOTIF_EV_TYPE:
1129                 dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n");
1130                 break;
1131             default:
1132                 dev_dbg(gpii->gpi_dev->dev,
1133                     "not supported event type:0x%x\n", type);
1134             }
1135             gpi_ring_recycle_ev_element(ev_ring);
1136         }
1137         gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1138 
1139         /* clear pending IEOB events */
1140         gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
1141 
1142         cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1143         rp = to_virtual(ev_ring, cntxt_rp);
1144 
1145     } while (rp != ev_ring->rp);
1146 }
1147 
1148 /* processing events using tasklet */
1149 static void gpi_ev_tasklet(unsigned long data)
1150 {
1151     struct gpii *gpii = (struct gpii *)data;
1152 
1153     read_lock_bh(&gpii->pm_lock);
1154     if (!REG_ACCESS_VALID(gpii->pm_state)) {
1155         read_unlock_bh(&gpii->pm_lock);
1156         dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
1157             TO_GPI_PM_STR(gpii->pm_state));
1158         return;
1159     }
1160 
1161     /* process the events */
1162     gpi_process_events(gpii);
1163 
1164     /* enable IEOB, switching back to interrupts */
1165     gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1166     read_unlock_bh(&gpii->pm_lock);
1167 }
1168 
1169 /* marks all pending events for the channel as stale */
1170 static void gpi_mark_stale_events(struct gchan *gchan)
1171 {
1172     struct gpii *gpii = gchan->gpii;
1173     struct gpi_ring *ev_ring = &gpii->ev_ring;
1174     u32 cntxt_rp, local_rp;
1175     void *ev_rp;
1176 
1177     cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1178 
1179     ev_rp = ev_ring->rp;
1180     local_rp = (u32)to_physical(ev_ring, ev_rp);
1181     while (local_rp != cntxt_rp) {
1182         union gpi_event *gpi_event = ev_rp;
1183         u32 chid = gpi_event->xfer_compl_event.chid;
1184 
1185         if (chid == gchan->chid)
1186             gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1187         ev_rp += ev_ring->el_size;
1188         if (ev_rp >= (ev_ring->base + ev_ring->len))
1189             ev_rp = ev_ring->base;
1190         cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1191         local_rp = (u32)to_physical(ev_ring, ev_rp);
1192     }
1193 }
1194 
1195 /* reset sw state and issue channel reset or de-alloc */
1196 static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd)
1197 {
1198     struct gpii *gpii = gchan->gpii;
1199     struct gpi_ring *ch_ring = &gchan->ch_ring;
1200     unsigned long flags;
1201     LIST_HEAD(list);
1202     int ret;
1203 
1204     ret = gpi_send_cmd(gpii, gchan, gpi_cmd);
1205     if (ret) {
1206         dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1207             TO_GPI_CMD_STR(gpi_cmd), ret);
1208         return ret;
1209     }
1210 
1211     /* initialize the local ring ptrs */
1212     ch_ring->rp = ch_ring->base;
1213     ch_ring->wp = ch_ring->base;
1214 
1215     /* visible to other cores */
1216     smp_wmb();
1217 
1218     /* check event ring for any stale events */
1219     write_lock_irq(&gpii->pm_lock);
1220     gpi_mark_stale_events(gchan);
1221 
1222     /* remove all async descriptors */
1223     spin_lock_irqsave(&gchan->vc.lock, flags);
1224     vchan_get_all_descriptors(&gchan->vc, &list);
1225     spin_unlock_irqrestore(&gchan->vc.lock, flags);
1226     write_unlock_irq(&gpii->pm_lock);
1227     vchan_dma_desc_free_list(&gchan->vc, &list);
1228 
1229     return 0;
1230 }
1231 
1232 static int gpi_start_chan(struct gchan *gchan)
1233 {
1234     struct gpii *gpii = gchan->gpii;
1235     int ret;
1236 
1237     ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START);
1238     if (ret) {
1239         dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1240             TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1241         return ret;
1242     }
1243 
1244     /* gpii CH is active now */
1245     write_lock_irq(&gpii->pm_lock);
1246     gchan->pm_state = ACTIVE_STATE;
1247     write_unlock_irq(&gpii->pm_lock);
1248 
1249     return 0;
1250 }
1251 
1252 static int gpi_stop_chan(struct gchan *gchan)
1253 {
1254     struct gpii *gpii = gchan->gpii;
1255     int ret;
1256 
1257     ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP);
1258     if (ret) {
1259         dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1260             TO_GPI_CMD_STR(GPI_CH_CMD_STOP), ret);
1261         return ret;
1262     }
1263 
1264     return 0;
1265 }
1266 
1267 /* allocate and configure the transfer channel */
1268 static int gpi_alloc_chan(struct gchan *chan, bool send_alloc_cmd)
1269 {
1270     struct gpii *gpii = chan->gpii;
1271     struct gpi_ring *ring = &chan->ch_ring;
1272     int ret;
1273     u32 id = gpii->gpii_id;
1274     u32 chid = chan->chid;
1275     u32 pair_chid = !chid;
1276 
1277     if (send_alloc_cmd) {
1278         ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE);
1279         if (ret) {
1280             dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1281                 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1282             return ret;
1283         }
1284     }
1285 
1286     gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG,
1287               GPII_n_CH_k_CNTXT_0(ring->el_size, 0, chan->dir, GPI_CHTYPE_PROTO_GPI));
1288     gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len);
1289     gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr);
1290     gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB,
1291               upper_32_bits(ring->phys_addr));
1292     gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1293               upper_32_bits(ring->phys_addr));
1294     gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
1295               GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid));
1296     gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
1297     gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
1298     gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
1299     gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1);
1300 
1301     /* flush all the writes */
1302     wmb();
1303     return 0;
1304 }
1305 
1306 /* allocate and configure event ring */
1307 static int gpi_alloc_ev_chan(struct gpii *gpii)
1308 {
1309     struct gpi_ring *ring = &gpii->ev_ring;
1310     void __iomem *base = gpii->ev_cntxt_base_reg;
1311     int ret;
1312 
1313     ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1314     if (ret) {
1315         dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n",
1316             TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1317         return ret;
1318     }
1319 
1320     /* program event context */
1321     gpi_write_reg(gpii, base + CNTXT_0_CONFIG,
1322               GPII_n_EV_k_CNTXT_0(ring->el_size, GPI_INTTYPE_IRQ, GPI_CHTYPE_GPI_EV));
1323     gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len);
1324     gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr));
1325     gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr));
1326     gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1327               upper_32_bits(ring->phys_addr));
1328     gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1329     gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0);
1330     gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0);
1331     gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1332     gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0);
1333     gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0);
1334 
1335     /* add events to ring */
1336     ring->wp = (ring->base + ring->len - ring->el_size);
1337 
1338     /* flush all the writes */
1339     wmb();
1340 
1341     /* gpii is active now */
1342     write_lock_irq(&gpii->pm_lock);
1343     gpii->pm_state = ACTIVE_STATE;
1344     write_unlock_irq(&gpii->pm_lock);
1345     gpi_write_ev_db(gpii, ring, ring->wp);
1346 
1347     return 0;
1348 }
1349 
1350 /* calculate # of ERE/TRE available to queue */
1351 static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1352 {
1353     int elements = 0;
1354 
1355     if (ring->wp < ring->rp) {
1356         elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1357     } else {
1358         elements = (ring->rp - ring->base) / ring->el_size;
1359         elements += ((ring->base + ring->len - ring->wp) / ring->el_size) - 1;
1360     }
1361 
1362     return elements;
1363 }
1364 
1365 static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1366 {
1367     if (gpi_ring_num_elements_avail(ring) <= 0)
1368         return -ENOMEM;
1369 
1370     *wp = ring->wp;
1371     ring->wp += ring->el_size;
1372     if (ring->wp  >= (ring->base + ring->len))
1373         ring->wp = ring->base;
1374 
1375     /* visible to other cores */
1376     smp_wmb();
1377 
1378     return 0;
1379 }
1380 
1381 static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1382 {
1383     /* Update the WP */
1384     ring->wp += ring->el_size;
1385     if (ring->wp  >= (ring->base + ring->len))
1386         ring->wp = ring->base;
1387 
1388     /* Update the RP */
1389     ring->rp += ring->el_size;
1390     if (ring->rp  >= (ring->base + ring->len))
1391         ring->rp = ring->base;
1392 
1393     /* visible to other cores */
1394     smp_wmb();
1395 }
1396 
1397 static void gpi_free_ring(struct gpi_ring *ring,
1398               struct gpii *gpii)
1399 {
1400     dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1401               ring->pre_aligned, ring->dma_handle);
1402     memset(ring, 0, sizeof(*ring));
1403 }
1404 
1405 /* allocate memory for transfer and event rings */
1406 static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
1407               u32 el_size, struct gpii *gpii)
1408 {
1409     u64 len = elements * el_size;
1410     int bit;
1411 
1412     /* ring len must be power of 2 */
1413     bit = find_last_bit((unsigned long *)&len, 32);
1414     if (((1 << bit) - 1) & len)
1415         bit++;
1416     len = 1 << bit;
1417     ring->alloc_size = (len + (len - 1));
1418     dev_dbg(gpii->gpi_dev->dev,
1419         "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
1420           elements, el_size, (elements * el_size), len,
1421           ring->alloc_size);
1422 
1423     ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1424                            ring->alloc_size,
1425                            &ring->dma_handle, GFP_KERNEL);
1426     if (!ring->pre_aligned) {
1427         dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
1428             ring->alloc_size);
1429         return -ENOMEM;
1430     }
1431 
1432     /* align the physical mem */
1433     ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1434     ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
1435     ring->rp = ring->base;
1436     ring->wp = ring->base;
1437     ring->len = len;
1438     ring->el_size = el_size;
1439     ring->elements = ring->len / ring->el_size;
1440     memset(ring->base, 0, ring->len);
1441     ring->configured = true;
1442 
1443     /* update to other cores */
1444     smp_wmb();
1445 
1446     dev_dbg(gpii->gpi_dev->dev,
1447         "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
1448         &ring->dma_handle, &ring->phys_addr, ring->len,
1449         ring->el_size, ring->elements);
1450 
1451     return 0;
1452 }
1453 
1454 /* copy tre into transfer ring */
1455 static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan,
1456                struct gpi_tre *gpi_tre, void **wp)
1457 {
1458     struct gpi_tre *ch_tre;
1459     int ret;
1460 
1461     /* get next tre location we can copy */
1462     ret = gpi_ring_add_element(&gchan->ch_ring, (void **)&ch_tre);
1463     if (unlikely(ret)) {
1464         dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n");
1465         return;
1466     }
1467 
1468     /* copy the tre info */
1469     memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1470     *wp = ch_tre;
1471 }
1472 
1473 /* reset and restart transfer channel */
1474 static int gpi_terminate_all(struct dma_chan *chan)
1475 {
1476     struct gchan *gchan = to_gchan(chan);
1477     struct gpii *gpii = gchan->gpii;
1478     int schid, echid, i;
1479     int ret = 0;
1480 
1481     mutex_lock(&gpii->ctrl_lock);
1482 
1483     /*
1484      * treat both channels as a group if its protocol is not UART
1485      * STOP, RESET, or START needs to be in lockstep
1486      */
1487     schid = (gchan->protocol == QCOM_GPI_UART) ? gchan->chid : 0;
1488     echid = (gchan->protocol == QCOM_GPI_UART) ? schid + 1 : MAX_CHANNELS_PER_GPII;
1489 
1490     /* stop the channel */
1491     for (i = schid; i < echid; i++) {
1492         gchan = &gpii->gchan[i];
1493 
1494         /* disable ch state so no more TRE processing */
1495         write_lock_irq(&gpii->pm_lock);
1496         gchan->pm_state = PREPARE_TERMINATE;
1497         write_unlock_irq(&gpii->pm_lock);
1498 
1499         /* send command to Stop the channel */
1500         ret = gpi_stop_chan(gchan);
1501     }
1502 
1503     /* reset the channels (clears any pending tre) */
1504     for (i = schid; i < echid; i++) {
1505         gchan = &gpii->gchan[i];
1506 
1507         ret = gpi_reset_chan(gchan, GPI_CH_CMD_RESET);
1508         if (ret) {
1509             dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret);
1510             goto terminate_exit;
1511         }
1512 
1513         /* reprogram channel CNTXT */
1514         ret = gpi_alloc_chan(gchan, false);
1515         if (ret) {
1516             dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret);
1517             goto terminate_exit;
1518         }
1519     }
1520 
1521     /* restart the channels */
1522     for (i = schid; i < echid; i++) {
1523         gchan = &gpii->gchan[i];
1524 
1525         ret = gpi_start_chan(gchan);
1526         if (ret) {
1527             dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret);
1528             goto terminate_exit;
1529         }
1530     }
1531 
1532 terminate_exit:
1533     mutex_unlock(&gpii->ctrl_lock);
1534     return ret;
1535 }
1536 
1537 /* pause dma transfer for all channels */
1538 static int gpi_pause(struct dma_chan *chan)
1539 {
1540     struct gchan *gchan = to_gchan(chan);
1541     struct gpii *gpii = gchan->gpii;
1542     int i, ret;
1543 
1544     mutex_lock(&gpii->ctrl_lock);
1545 
1546     /*
1547      * pause/resume are per gpii not per channel, so
1548      * client needs to call pause only once
1549      */
1550     if (gpii->pm_state == PAUSE_STATE) {
1551         dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n");
1552         mutex_unlock(&gpii->ctrl_lock);
1553         return 0;
1554     }
1555 
1556     /* send stop command to stop the channels */
1557     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1558         ret = gpi_stop_chan(&gpii->gchan[i]);
1559         if (ret) {
1560             mutex_unlock(&gpii->ctrl_lock);
1561             return ret;
1562         }
1563     }
1564 
1565     disable_irq(gpii->irq);
1566 
1567     /* Wait for threads to complete out */
1568     tasklet_kill(&gpii->ev_task);
1569 
1570     write_lock_irq(&gpii->pm_lock);
1571     gpii->pm_state = PAUSE_STATE;
1572     write_unlock_irq(&gpii->pm_lock);
1573     mutex_unlock(&gpii->ctrl_lock);
1574 
1575     return 0;
1576 }
1577 
1578 /* resume dma transfer */
1579 static int gpi_resume(struct dma_chan *chan)
1580 {
1581     struct gchan *gchan = to_gchan(chan);
1582     struct gpii *gpii = gchan->gpii;
1583     int i, ret;
1584 
1585     mutex_lock(&gpii->ctrl_lock);
1586     if (gpii->pm_state == ACTIVE_STATE) {
1587         dev_dbg(gpii->gpi_dev->dev, "channel is already active\n");
1588         mutex_unlock(&gpii->ctrl_lock);
1589         return 0;
1590     }
1591 
1592     enable_irq(gpii->irq);
1593 
1594     /* send start command to start the channels */
1595     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1596         ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START);
1597         if (ret) {
1598             dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret);
1599             mutex_unlock(&gpii->ctrl_lock);
1600             return ret;
1601         }
1602     }
1603 
1604     write_lock_irq(&gpii->pm_lock);
1605     gpii->pm_state = ACTIVE_STATE;
1606     write_unlock_irq(&gpii->pm_lock);
1607     mutex_unlock(&gpii->ctrl_lock);
1608 
1609     return 0;
1610 }
1611 
1612 static void gpi_desc_free(struct virt_dma_desc *vd)
1613 {
1614     struct gpi_desc *gpi_desc = to_gpi_desc(vd);
1615 
1616     kfree(gpi_desc);
1617     gpi_desc = NULL;
1618 }
1619 
1620 static int
1621 gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
1622 {
1623     struct gchan *gchan = to_gchan(chan);
1624 
1625     if (!config->peripheral_config)
1626         return -EINVAL;
1627 
1628     gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
1629     if (!gchan->config)
1630         return -ENOMEM;
1631 
1632     memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
1633 
1634     return 0;
1635 }
1636 
1637 static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
1638                   struct scatterlist *sgl, enum dma_transfer_direction direction)
1639 {
1640     struct gpi_i2c_config *i2c = chan->config;
1641     struct device *dev = chan->gpii->gpi_dev->dev;
1642     unsigned int tre_idx = 0;
1643     dma_addr_t address;
1644     struct gpi_tre *tre;
1645     unsigned int i;
1646 
1647     /* first create config tre if applicable */
1648     if (i2c->set_config) {
1649         tre = &desc->tre[tre_idx];
1650         tre_idx++;
1651 
1652         tre->dword[0] = u32_encode_bits(i2c->low_count, TRE_I2C_C0_TLOW);
1653         tre->dword[0] |= u32_encode_bits(i2c->high_count, TRE_I2C_C0_THIGH);
1654         tre->dword[0] |= u32_encode_bits(i2c->cycle_count, TRE_I2C_C0_TCYL);
1655         tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_TX_PACK);
1656         tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_RX_PACK);
1657 
1658         tre->dword[1] = 0;
1659 
1660         tre->dword[2] = u32_encode_bits(i2c->clk_div, TRE_C0_CLK_DIV);
1661 
1662         tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
1663         tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
1664     }
1665 
1666     /* create the GO tre for Tx */
1667     if (i2c->op == I2C_WRITE) {
1668         tre = &desc->tre[tre_idx];
1669         tre_idx++;
1670 
1671         if (i2c->multi_msg)
1672             tre->dword[0] = u32_encode_bits(I2C_READ, TRE_I2C_GO_CMD);
1673         else
1674             tre->dword[0] = u32_encode_bits(i2c->op, TRE_I2C_GO_CMD);
1675 
1676         tre->dword[0] |= u32_encode_bits(i2c->addr, TRE_I2C_GO_ADDR);
1677         tre->dword[0] |= u32_encode_bits(i2c->stretch, TRE_I2C_GO_STRETCH);
1678 
1679         tre->dword[1] = 0;
1680         tre->dword[2] = u32_encode_bits(i2c->rx_len, TRE_RX_LEN);
1681 
1682         tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
1683 
1684         if (i2c->multi_msg)
1685             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
1686         else
1687             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
1688     }
1689 
1690     if (i2c->op == I2C_READ || i2c->multi_msg == false) {
1691         /* create the DMA TRE */
1692         tre = &desc->tre[tre_idx];
1693         tre_idx++;
1694 
1695         address = sg_dma_address(sgl);
1696         tre->dword[0] = lower_32_bits(address);
1697         tre->dword[1] = upper_32_bits(address);
1698 
1699         tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
1700 
1701         tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
1702         tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
1703     }
1704 
1705     for (i = 0; i < tre_idx; i++)
1706         dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
1707             desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
1708 
1709     return tre_idx;
1710 }
1711 
1712 static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
1713                   struct scatterlist *sgl, enum dma_transfer_direction direction)
1714 {
1715     struct gpi_spi_config *spi = chan->config;
1716     struct device *dev = chan->gpii->gpi_dev->dev;
1717     unsigned int tre_idx = 0;
1718     dma_addr_t address;
1719     struct gpi_tre *tre;
1720     unsigned int i;
1721 
1722     /* first create config tre if applicable */
1723     if (direction == DMA_MEM_TO_DEV && spi->set_config) {
1724         tre = &desc->tre[tre_idx];
1725         tre_idx++;
1726 
1727         tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ);
1728         tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK);
1729         tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL);
1730         tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA);
1731         tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK);
1732         tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK);
1733 
1734         tre->dword[1] = 0;
1735 
1736         tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV);
1737         tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC);
1738 
1739         tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
1740         tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
1741     }
1742 
1743     /* create the GO tre for Tx */
1744     if (direction == DMA_MEM_TO_DEV) {
1745         tre = &desc->tre[tre_idx];
1746         tre_idx++;
1747 
1748         tre->dword[0] = u32_encode_bits(spi->fragmentation, TRE_SPI_GO_FRAG);
1749         tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS);
1750         tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD);
1751 
1752         tre->dword[1] = 0;
1753 
1754         tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
1755 
1756         tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
1757         if (spi->cmd == SPI_RX) {
1758             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
1759         } else if (spi->cmd == SPI_TX) {
1760             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
1761         } else { /* SPI_DUPLEX */
1762             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
1763             tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
1764         }
1765     }
1766 
1767     /* create the dma tre */
1768     tre = &desc->tre[tre_idx];
1769     tre_idx++;
1770 
1771     address = sg_dma_address(sgl);
1772     tre->dword[0] = lower_32_bits(address);
1773     tre->dword[1] = upper_32_bits(address);
1774 
1775     tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
1776 
1777     tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
1778     if (direction == DMA_MEM_TO_DEV)
1779         tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
1780 
1781     for (i = 0; i < tre_idx; i++)
1782         dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
1783             desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
1784 
1785     return tre_idx;
1786 }
1787 
1788 /* copy tre into transfer ring */
1789 static struct dma_async_tx_descriptor *
1790 gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1791           unsigned int sg_len, enum dma_transfer_direction direction,
1792           unsigned long flags, void *context)
1793 {
1794     struct gchan *gchan = to_gchan(chan);
1795     struct gpii *gpii = gchan->gpii;
1796     struct device *dev = gpii->gpi_dev->dev;
1797     struct gpi_ring *ch_ring = &gchan->ch_ring;
1798     struct gpi_desc *gpi_desc;
1799     u32 nr, nr_tre = 0;
1800     u8 set_config;
1801     int i;
1802 
1803     gpii->ieob_set = false;
1804     if (!is_slave_direction(direction)) {
1805         dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction);
1806         return NULL;
1807     }
1808 
1809     if (sg_len > 1) {
1810         dev_err(dev, "Multi sg sent, we support only one atm: %d\n", sg_len);
1811         return NULL;
1812     }
1813 
1814     nr_tre = 3;
1815     set_config = *(u32 *)gchan->config;
1816     if (!set_config)
1817         nr_tre = 2;
1818     if (direction == DMA_DEV_TO_MEM) /* rx */
1819         nr_tre = 1;
1820 
1821     /* calculate # of elements required & available */
1822     nr = gpi_ring_num_elements_avail(ch_ring);
1823     if (nr < nr_tre) {
1824         dev_err(dev, "not enough space in ring, avail:%u required:%u\n", nr, nr_tre);
1825         return NULL;
1826     }
1827 
1828     gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
1829     if (!gpi_desc)
1830         return NULL;
1831 
1832     /* create TREs for xfer */
1833     if (gchan->protocol == QCOM_GPI_SPI) {
1834         i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
1835     } else if (gchan->protocol == QCOM_GPI_I2C) {
1836         i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
1837     } else {
1838         dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
1839         kfree(gpi_desc);
1840         return NULL;
1841     }
1842 
1843     /* set up the descriptor */
1844     gpi_desc->gchan = gchan;
1845     gpi_desc->len = sg_dma_len(sgl);
1846     gpi_desc->num_tre  = i;
1847 
1848     return vchan_tx_prep(&gchan->vc, &gpi_desc->vd, flags);
1849 }
1850 
1851 /* rings transfer ring db to being transfer */
1852 static void gpi_issue_pending(struct dma_chan *chan)
1853 {
1854     struct gchan *gchan = to_gchan(chan);
1855     struct gpii *gpii = gchan->gpii;
1856     unsigned long flags, pm_lock_flags;
1857     struct virt_dma_desc *vd = NULL;
1858     struct gpi_desc *gpi_desc;
1859     struct gpi_ring *ch_ring = &gchan->ch_ring;
1860     void *tre, *wp = NULL;
1861     int i;
1862 
1863     read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
1864 
1865     /* move all submitted discriptors to issued list */
1866     spin_lock_irqsave(&gchan->vc.lock, flags);
1867     if (vchan_issue_pending(&gchan->vc))
1868         vd = list_last_entry(&gchan->vc.desc_issued,
1869                      struct virt_dma_desc, node);
1870     spin_unlock_irqrestore(&gchan->vc.lock, flags);
1871 
1872     /* nothing to do list is empty */
1873     if (!vd) {
1874         read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1875         return;
1876     }
1877 
1878     gpi_desc = to_gpi_desc(vd);
1879     for (i = 0; i < gpi_desc->num_tre; i++) {
1880         tre = &gpi_desc->tre[i];
1881         gpi_queue_xfer(gpii, gchan, tre, &wp);
1882     }
1883 
1884     gpi_desc->db = ch_ring->wp;
1885     gpi_write_ch_db(gchan, &gchan->ch_ring, gpi_desc->db);
1886     read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1887 }
1888 
1889 static int gpi_ch_init(struct gchan *gchan)
1890 {
1891     struct gpii *gpii = gchan->gpii;
1892     const int ev_factor = gpii->gpi_dev->ev_factor;
1893     u32 elements;
1894     int i = 0, ret = 0;
1895 
1896     gchan->pm_state = CONFIG_STATE;
1897 
1898     /* check if both channels are configured before continue */
1899     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
1900         if (gpii->gchan[i].pm_state != CONFIG_STATE)
1901             goto exit_gpi_init;
1902 
1903     /* protocol must be same for both channels */
1904     if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) {
1905         dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n",
1906             gpii->gchan[0].protocol, gpii->gchan[1].protocol);
1907         ret = -EINVAL;
1908         goto exit_gpi_init;
1909     }
1910 
1911     /* allocate memory for event ring */
1912     elements = CHAN_TRES << ev_factor;
1913     ret = gpi_alloc_ring(&gpii->ev_ring, elements,
1914                  sizeof(union gpi_event), gpii);
1915     if (ret)
1916         goto exit_gpi_init;
1917 
1918     /* configure interrupts */
1919     write_lock_irq(&gpii->pm_lock);
1920     gpii->pm_state = PREPARE_HARDWARE;
1921     write_unlock_irq(&gpii->pm_lock);
1922     ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
1923     if (ret) {
1924         dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret);
1925         goto error_config_int;
1926     }
1927 
1928     /* allocate event rings */
1929     ret = gpi_alloc_ev_chan(gpii);
1930     if (ret) {
1931         dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret);
1932         goto error_alloc_ev_ring;
1933     }
1934 
1935     /* Allocate all channels */
1936     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1937         ret = gpi_alloc_chan(&gpii->gchan[i], true);
1938         if (ret) {
1939             dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret);
1940             goto error_alloc_chan;
1941         }
1942     }
1943 
1944     /* start channels  */
1945     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1946         ret = gpi_start_chan(&gpii->gchan[i]);
1947         if (ret) {
1948             dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret);
1949             goto error_start_chan;
1950         }
1951     }
1952     return ret;
1953 
1954 error_start_chan:
1955     for (i = i - 1; i >= 0; i--) {
1956         gpi_stop_chan(&gpii->gchan[i]);
1957         gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1958     }
1959     i = 2;
1960 error_alloc_chan:
1961     for (i = i - 1; i >= 0; i--)
1962         gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
1963 error_alloc_ev_ring:
1964     gpi_disable_interrupts(gpii);
1965 error_config_int:
1966     gpi_free_ring(&gpii->ev_ring, gpii);
1967 exit_gpi_init:
1968     mutex_unlock(&gpii->ctrl_lock);
1969     return ret;
1970 }
1971 
1972 /* release all channel resources */
1973 static void gpi_free_chan_resources(struct dma_chan *chan)
1974 {
1975     struct gchan *gchan = to_gchan(chan);
1976     struct gpii *gpii = gchan->gpii;
1977     enum gpi_pm_state cur_state;
1978     int ret, i;
1979 
1980     mutex_lock(&gpii->ctrl_lock);
1981 
1982     cur_state = gchan->pm_state;
1983 
1984     /* disable ch state so no more TRE processing for this channel */
1985     write_lock_irq(&gpii->pm_lock);
1986     gchan->pm_state = PREPARE_TERMINATE;
1987     write_unlock_irq(&gpii->pm_lock);
1988 
1989     /* attempt to do graceful hardware shutdown */
1990     if (cur_state == ACTIVE_STATE) {
1991         gpi_stop_chan(gchan);
1992 
1993         ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1994         if (ret)
1995             dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret);
1996 
1997         gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
1998     }
1999 
2000     /* free all allocated memory */
2001     gpi_free_ring(&gchan->ch_ring, gpii);
2002     vchan_free_chan_resources(&gchan->vc);
2003     kfree(gchan->config);
2004 
2005     write_lock_irq(&gpii->pm_lock);
2006     gchan->pm_state = DISABLE_STATE;
2007     write_unlock_irq(&gpii->pm_lock);
2008 
2009     /* if other rings are still active exit */
2010     for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2011         if (gpii->gchan[i].ch_ring.configured)
2012             goto exit_free;
2013 
2014     /* deallocate EV Ring */
2015     cur_state = gpii->pm_state;
2016     write_lock_irq(&gpii->pm_lock);
2017     gpii->pm_state = PREPARE_TERMINATE;
2018     write_unlock_irq(&gpii->pm_lock);
2019 
2020     /* wait for threads to complete out */
2021     tasklet_kill(&gpii->ev_task);
2022 
2023     /* send command to de allocate event ring */
2024     if (cur_state == ACTIVE_STATE)
2025         gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2026 
2027     gpi_free_ring(&gpii->ev_ring, gpii);
2028 
2029     /* disable interrupts */
2030     if (cur_state == ACTIVE_STATE)
2031         gpi_disable_interrupts(gpii);
2032 
2033     /* set final state to disable */
2034     write_lock_irq(&gpii->pm_lock);
2035     gpii->pm_state = DISABLE_STATE;
2036     write_unlock_irq(&gpii->pm_lock);
2037 
2038 exit_free:
2039     mutex_unlock(&gpii->ctrl_lock);
2040 }
2041 
2042 /* allocate channel resources */
2043 static int gpi_alloc_chan_resources(struct dma_chan *chan)
2044 {
2045     struct gchan *gchan = to_gchan(chan);
2046     struct gpii *gpii = gchan->gpii;
2047     int ret;
2048 
2049     mutex_lock(&gpii->ctrl_lock);
2050 
2051     /* allocate memory for transfer ring */
2052     ret = gpi_alloc_ring(&gchan->ch_ring, CHAN_TRES,
2053                  sizeof(struct gpi_tre), gpii);
2054     if (ret)
2055         goto xfer_alloc_err;
2056 
2057     ret = gpi_ch_init(gchan);
2058 
2059     mutex_unlock(&gpii->ctrl_lock);
2060 
2061     return ret;
2062 xfer_alloc_err:
2063     mutex_unlock(&gpii->ctrl_lock);
2064 
2065     return ret;
2066 }
2067 
2068 static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2069 {
2070     struct gchan *tx_chan, *rx_chan;
2071     unsigned int gpii;
2072 
2073     /* check if same seid is already configured for another chid */
2074     for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2075         if (!((1 << gpii) & gpi_dev->gpii_mask))
2076             continue;
2077 
2078         tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2079         rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2080 
2081         if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2082             return gpii;
2083         if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2084             return gpii;
2085     }
2086 
2087     /* no channels configured with same seid, return next avail gpii */
2088     for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2089         if (!((1 << gpii) & gpi_dev->gpii_mask))
2090             continue;
2091 
2092         tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2093         rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2094 
2095         /* check if gpii is configured */
2096         if (tx_chan->vc.chan.client_count ||
2097             rx_chan->vc.chan.client_count)
2098             continue;
2099 
2100         /* found a free gpii */
2101         return gpii;
2102     }
2103 
2104     /* no gpii instance available to use */
2105     return -EIO;
2106 }
2107 
2108 /* gpi_of_dma_xlate: open client requested channel */
2109 static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2110                      struct of_dma *of_dma)
2111 {
2112     struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
2113     u32 seid, chid;
2114     int gpii;
2115     struct gchan *gchan;
2116 
2117     if (args->args_count < 3) {
2118         dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n",
2119             args->args_count);
2120         return NULL;
2121     }
2122 
2123     chid = args->args[0];
2124     if (chid >= MAX_CHANNELS_PER_GPII) {
2125         dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid);
2126         return NULL;
2127     }
2128 
2129     seid = args->args[1];
2130 
2131     /* find next available gpii to use */
2132     gpii = gpi_find_avail_gpii(gpi_dev, seid);
2133     if (gpii < 0) {
2134         dev_err(gpi_dev->dev, "no available gpii instances\n");
2135         return NULL;
2136     }
2137 
2138     gchan = &gpi_dev->gpiis[gpii].gchan[chid];
2139     if (gchan->vc.chan.client_count) {
2140         dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n",
2141             gpii, chid, gchan->seid);
2142         return NULL;
2143     }
2144 
2145     gchan->seid = seid;
2146     gchan->protocol = args->args[2];
2147 
2148     return dma_get_slave_channel(&gchan->vc.chan);
2149 }
2150 
2151 static int gpi_probe(struct platform_device *pdev)
2152 {
2153     struct gpi_dev *gpi_dev;
2154     unsigned int i;
2155     u32 ee_offset;
2156     int ret;
2157 
2158     gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2159     if (!gpi_dev)
2160         return -ENOMEM;
2161 
2162     gpi_dev->dev = &pdev->dev;
2163     gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2164     gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
2165     if (IS_ERR(gpi_dev->regs))
2166         return PTR_ERR(gpi_dev->regs);
2167     gpi_dev->ee_base = gpi_dev->regs;
2168 
2169     ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channels",
2170                    &gpi_dev->max_gpii);
2171     if (ret) {
2172         dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n");
2173         return ret;
2174     }
2175 
2176     ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channel-mask",
2177                    &gpi_dev->gpii_mask);
2178     if (ret) {
2179         dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n");
2180         return ret;
2181     }
2182 
2183     ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev);
2184     gpi_dev->ee_base = gpi_dev->ee_base - ee_offset;
2185 
2186     gpi_dev->ev_factor = EV_FACTOR;
2187 
2188     ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
2189     if (ret) {
2190         dev_err(gpi_dev->dev, "Error setting dma_mask to 64, ret:%d\n", ret);
2191         return ret;
2192     }
2193 
2194     gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev, sizeof(*gpi_dev->gpiis) *
2195                       gpi_dev->max_gpii, GFP_KERNEL);
2196     if (!gpi_dev->gpiis)
2197         return -ENOMEM;
2198 
2199     /* setup all the supported gpii */
2200     INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2201     for (i = 0; i < gpi_dev->max_gpii; i++) {
2202         struct gpii *gpii = &gpi_dev->gpiis[i];
2203         int chan;
2204 
2205         if (!((1 << i) & gpi_dev->gpii_mask))
2206             continue;
2207 
2208         /* set up ev cntxt register map */
2209         gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2210         gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2211         gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB;
2212         gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i);
2213         gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2214 
2215         /* set up irq */
2216         ret = platform_get_irq(pdev, i);
2217         if (ret < 0)
2218             return ret;
2219         gpii->irq = ret;
2220 
2221         /* set up channel specific register info */
2222         for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2223             struct gchan *gchan = &gpii->gchan[chan];
2224 
2225             /* set up ch cntxt register map */
2226             gchan->ch_cntxt_base_reg = gpi_dev->ee_base +
2227                 GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2228             gchan->ch_cntxt_db_reg = gpi_dev->ee_base +
2229                 GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2230             gchan->ch_cmd_reg = gpi_dev->ee_base + GPII_n_CH_CMD_OFFS(i);
2231 
2232             /* vchan setup */
2233             vchan_init(&gchan->vc, &gpi_dev->dma_device);
2234             gchan->vc.desc_free = gpi_desc_free;
2235             gchan->chid = chan;
2236             gchan->gpii = gpii;
2237             gchan->dir = GPII_CHAN_DIR[chan];
2238         }
2239         mutex_init(&gpii->ctrl_lock);
2240         rwlock_init(&gpii->pm_lock);
2241         tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2242                  (unsigned long)gpii);
2243         init_completion(&gpii->cmd_completion);
2244         gpii->gpii_id = i;
2245         gpii->regs = gpi_dev->ee_base;
2246         gpii->gpi_dev = gpi_dev;
2247     }
2248 
2249     platform_set_drvdata(pdev, gpi_dev);
2250 
2251     /* clear and Set capabilities */
2252     dma_cap_zero(gpi_dev->dma_device.cap_mask);
2253     dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2254 
2255     /* configure dmaengine apis */
2256     gpi_dev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2257     gpi_dev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2258     gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2259     gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2260     gpi_dev->dma_device.device_alloc_chan_resources = gpi_alloc_chan_resources;
2261     gpi_dev->dma_device.device_free_chan_resources = gpi_free_chan_resources;
2262     gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2263     gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2264     gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2265     gpi_dev->dma_device.device_config = gpi_peripheral_config;
2266     gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2267     gpi_dev->dma_device.dev = gpi_dev->dev;
2268     gpi_dev->dma_device.device_pause = gpi_pause;
2269     gpi_dev->dma_device.device_resume = gpi_resume;
2270 
2271     /* register with dmaengine framework */
2272     ret = dma_async_device_register(&gpi_dev->dma_device);
2273     if (ret) {
2274         dev_err(gpi_dev->dev, "async_device_register failed ret:%d", ret);
2275         return ret;
2276     }
2277 
2278     ret = of_dma_controller_register(gpi_dev->dev->of_node,
2279                      gpi_of_dma_xlate, gpi_dev);
2280     if (ret) {
2281         dev_err(gpi_dev->dev, "of_dma_controller_reg failed ret:%d", ret);
2282         return ret;
2283     }
2284 
2285     return ret;
2286 }
2287 
2288 static const struct of_device_id gpi_of_match[] = {
2289     { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
2290     { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
2291     { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
2292     { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
2293     { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
2294     { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 },
2295     { },
2296 };
2297 MODULE_DEVICE_TABLE(of, gpi_of_match);
2298 
2299 static struct platform_driver gpi_driver = {
2300     .probe = gpi_probe,
2301     .driver = {
2302         .name = KBUILD_MODNAME,
2303         .of_match_table = gpi_of_match,
2304     },
2305 };
2306 
2307 static int __init gpi_init(void)
2308 {
2309     return platform_driver_register(&gpi_driver);
2310 }
2311 subsys_initcall(gpi_init)
2312 
2313 MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2314 MODULE_LICENSE("GPL v2");