Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
0004  * Copyright (C) 2018-2021 Linaro Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include <linux/bits.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/mutex.h>
0011 #include <linux/completion.h>
0012 #include <linux/io.h>
0013 #include <linux/bug.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/netdevice.h>
0017 
0018 #include "gsi.h"
0019 #include "gsi_reg.h"
0020 #include "gsi_private.h"
0021 #include "gsi_trans.h"
0022 #include "ipa_gsi.h"
0023 #include "ipa_data.h"
0024 #include "ipa_version.h"
0025 
0026 /**
0027  * DOC: The IPA Generic Software Interface
0028  *
0029  * The generic software interface (GSI) is an integral component of the IPA,
0030  * providing a well-defined communication layer between the AP subsystem
0031  * and the IPA core.  The modem uses the GSI layer as well.
0032  *
0033  *  --------         ---------
0034  *  |      |         |       |
0035  *  |  AP  +<---.   .----+ Modem |
0036  *  |      +--. |   | .->+       |
0037  *  |      |  | |   | |  |       |
0038  *  --------  | |   | |  ---------
0039  *        v |   v |
0040  *      --+-+---+-+--
0041  *      |    GSI    |
0042  *      |-----------|
0043  *      |       |
0044  *      |    IPA    |
0045  *      |       |
0046  *      -------------
0047  *
0048  * In the above diagram, the AP and Modem represent "execution environments"
0049  * (EEs), which are independent operating environments that use the IPA for
0050  * data transfer.
0051  *
0052  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
0053  * of data to or from the IPA.  A channel is implemented as a ring buffer,
0054  * with a DRAM-resident array of "transfer elements" (TREs) available to
0055  * describe transfers to or from other EEs through the IPA.  A transfer
0056  * element can also contain an immediate command, requesting the IPA perform
0057  * actions other than data transfer.
0058  *
0059  * Each TRE refers to a block of data--also located DRAM.  After writing one
0060  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
0061  * doorbell register to inform the receiving side how many elements have
0062  * been written.
0063  *
0064  * Each channel has a GSI "event ring" associated with it.  An event ring
0065  * is implemented very much like a channel ring, but is always directed from
0066  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
0067  * events by adding an entry to the event ring associated with the channel.
0068  * The GSI then writes its doorbell for the event ring, causing the target
0069  * EE to be interrupted.  Each entry in an event ring contains a pointer
0070  * to the channel TRE whose completion the event represents.
0071  *
0072  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
0073  * the completion of the transfer operation generates an entry (and possibly
0074  * an interrupt) in the channel's event ring.  Other flags allow transfer
0075  * elements to be chained together, forming a single logical transaction.
0076  * TRE flags are used to control whether and when interrupts are generated
0077  * to signal completion of channel transfers.
0078  *
0079  * Elements in channel and event rings are completed (or consumed) strictly
0080  * in order.  Completion of one entry implies the completion of all preceding
0081  * entries.  A single completion interrupt can therefore communicate the
0082  * completion of many transfers.
0083  *
0084  * Note that all GSI registers are little-endian, which is the assumed
0085  * endianness of I/O space accesses.  The accessor functions perform byte
0086  * swapping if needed (i.e., for a big endian CPU).
0087  */
0088 
0089 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
0090 #define GSI_EVT_RING_INT_MODT       (32 * 1) /* 1ms under 32KHz clock */
0091 
0092 #define GSI_CMD_TIMEOUT         50  /* milliseconds */
0093 
0094 #define GSI_CHANNEL_STOP_RETRIES    10
0095 #define GSI_CHANNEL_MODEM_HALT_RETRIES  10
0096 #define GSI_CHANNEL_MODEM_FLOW_RETRIES  5   /* disable flow control only */
0097 
0098 #define GSI_MHI_EVENT_ID_START      10  /* 1st reserved event id */
0099 #define GSI_MHI_EVENT_ID_END        16  /* Last reserved event id */
0100 
0101 #define GSI_ISR_MAX_ITER        50  /* Detect interrupt storms */
0102 
0103 /* An entry in an event ring */
0104 struct gsi_event {
0105     __le64 xfer_ptr;
0106     __le16 len;
0107     u8 reserved1;
0108     u8 code;
0109     __le16 reserved2;
0110     u8 type;
0111     u8 chid;
0112 };
0113 
0114 /** gsi_channel_scratch_gpi - GPI protocol scratch register
0115  * @max_outstanding_tre:
0116  *  Defines the maximum number of TREs allowed in a single transaction
0117  *  on a channel (in bytes).  This determines the amount of prefetch
0118  *  performed by the hardware.  We configure this to equal the size of
0119  *  the TLV FIFO for the channel.
0120  * @outstanding_threshold:
0121  *  Defines the threshold (in bytes) determining when the sequencer
0122  *  should update the channel doorbell.  We configure this to equal
0123  *  the size of two TREs.
0124  */
0125 struct gsi_channel_scratch_gpi {
0126     u64 reserved1;
0127     u16 reserved2;
0128     u16 max_outstanding_tre;
0129     u16 reserved3;
0130     u16 outstanding_threshold;
0131 };
0132 
0133 /** gsi_channel_scratch - channel scratch configuration area
0134  *
0135  * The exact interpretation of this register is protocol-specific.
0136  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
0137  */
0138 union gsi_channel_scratch {
0139     struct gsi_channel_scratch_gpi gpi;
0140     struct {
0141         u32 word1;
0142         u32 word2;
0143         u32 word3;
0144         u32 word4;
0145     } data;
0146 };
0147 
0148 /* Check things that can be validated at build time. */
0149 static void gsi_validate_build(void)
0150 {
0151     /* This is used as a divisor */
0152     BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
0153 
0154     /* Code assumes the size of channel and event ring element are
0155      * the same (and fixed).  Make sure the size of an event ring
0156      * element is what's expected.
0157      */
0158     BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
0159 
0160     /* Hardware requires a 2^n ring size.  We ensure the number of
0161      * elements in an event ring is a power of 2 elsewhere; this
0162      * ensure the elements themselves meet the requirement.
0163      */
0164     BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
0165 
0166     /* The channel element size must fit in this field */
0167     BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
0168 
0169     /* The event ring element size must fit in this field */
0170     BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
0171 }
0172 
0173 /* Return the channel id associated with a given channel */
0174 static u32 gsi_channel_id(struct gsi_channel *channel)
0175 {
0176     return channel - &channel->gsi->channel[0];
0177 }
0178 
0179 /* An initialized channel has a non-null GSI pointer */
0180 static bool gsi_channel_initialized(struct gsi_channel *channel)
0181 {
0182     return !!channel->gsi;
0183 }
0184 
0185 /* Update the GSI IRQ type register with the cached value */
0186 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
0187 {
0188     gsi->type_enabled_bitmap = val;
0189     iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
0190 }
0191 
0192 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
0193 {
0194     gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
0195 }
0196 
0197 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
0198 {
0199     gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
0200 }
0201 
0202 /* Event ring commands are performed one at a time.  Their completion
0203  * is signaled by the event ring control GSI interrupt type, which is
0204  * only enabled when we issue an event ring command.  Only the event
0205  * ring being operated on has this interrupt enabled.
0206  */
0207 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
0208 {
0209     u32 val = BIT(evt_ring_id);
0210 
0211     /* There's a small chance that a previous command completed
0212      * after the interrupt was disabled, so make sure we have no
0213      * pending interrupts before we enable them.
0214      */
0215     iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
0216 
0217     iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
0218     gsi_irq_type_enable(gsi, GSI_EV_CTRL);
0219 }
0220 
0221 /* Disable event ring control interrupts */
0222 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
0223 {
0224     gsi_irq_type_disable(gsi, GSI_EV_CTRL);
0225     iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
0226 }
0227 
0228 /* Channel commands are performed one at a time.  Their completion is
0229  * signaled by the channel control GSI interrupt type, which is only
0230  * enabled when we issue a channel command.  Only the channel being
0231  * operated on has this interrupt enabled.
0232  */
0233 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
0234 {
0235     u32 val = BIT(channel_id);
0236 
0237     /* There's a small chance that a previous command completed
0238      * after the interrupt was disabled, so make sure we have no
0239      * pending interrupts before we enable them.
0240      */
0241     iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
0242 
0243     iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
0244     gsi_irq_type_enable(gsi, GSI_CH_CTRL);
0245 }
0246 
0247 /* Disable channel control interrupts */
0248 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
0249 {
0250     gsi_irq_type_disable(gsi, GSI_CH_CTRL);
0251     iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
0252 }
0253 
0254 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
0255 {
0256     bool enable_ieob = !gsi->ieob_enabled_bitmap;
0257     u32 val;
0258 
0259     gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
0260     val = gsi->ieob_enabled_bitmap;
0261     iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
0262 
0263     /* Enable the interrupt type if this is the first channel enabled */
0264     if (enable_ieob)
0265         gsi_irq_type_enable(gsi, GSI_IEOB);
0266 }
0267 
0268 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
0269 {
0270     u32 val;
0271 
0272     gsi->ieob_enabled_bitmap &= ~event_mask;
0273 
0274     /* Disable the interrupt type if this was the last enabled channel */
0275     if (!gsi->ieob_enabled_bitmap)
0276         gsi_irq_type_disable(gsi, GSI_IEOB);
0277 
0278     val = gsi->ieob_enabled_bitmap;
0279     iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
0280 }
0281 
0282 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
0283 {
0284     gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
0285 }
0286 
0287 /* Enable all GSI_interrupt types */
0288 static void gsi_irq_enable(struct gsi *gsi)
0289 {
0290     u32 val;
0291 
0292     /* Global interrupts include hardware error reports.  Enable
0293      * that so we can at least report the error should it occur.
0294      */
0295     iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
0296     gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
0297 
0298     /* General GSI interrupts are reported to all EEs; if they occur
0299      * they are unrecoverable (without reset).  A breakpoint interrupt
0300      * also exists, but we don't support that.  We want to be notified
0301      * of errors so we can report them, even if they can't be handled.
0302      */
0303     val = BIT(BUS_ERROR);
0304     val |= BIT(CMD_FIFO_OVRFLOW);
0305     val |= BIT(MCS_STACK_OVRFLOW);
0306     iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
0307     gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
0308 }
0309 
0310 /* Disable all GSI interrupt types */
0311 static void gsi_irq_disable(struct gsi *gsi)
0312 {
0313     gsi_irq_type_update(gsi, 0);
0314 
0315     /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
0316     iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
0317     iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
0318 }
0319 
0320 /* Return the virtual address associated with a ring index */
0321 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
0322 {
0323     /* Note: index *must* be used modulo the ring count here */
0324     return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
0325 }
0326 
0327 /* Return the 32-bit DMA address associated with a ring index */
0328 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
0329 {
0330     return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
0331 }
0332 
0333 /* Return the ring index of a 32-bit ring offset */
0334 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
0335 {
0336     return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
0337 }
0338 
0339 /* Issue a GSI command by writing a value to a register, then wait for
0340  * completion to be signaled.  Returns true if the command completes
0341  * or false if it times out.
0342  */
0343 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
0344 {
0345     unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
0346     struct completion *completion = &gsi->completion;
0347 
0348     reinit_completion(completion);
0349 
0350     iowrite32(val, gsi->virt + reg);
0351 
0352     return !!wait_for_completion_timeout(completion, timeout);
0353 }
0354 
0355 /* Return the hardware's notion of the current state of an event ring */
0356 static enum gsi_evt_ring_state
0357 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
0358 {
0359     u32 val;
0360 
0361     val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
0362 
0363     return u32_get_bits(val, EV_CHSTATE_FMASK);
0364 }
0365 
0366 /* Issue an event ring command and wait for it to complete */
0367 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
0368                  enum gsi_evt_cmd_opcode opcode)
0369 {
0370     struct device *dev = gsi->dev;
0371     bool timeout;
0372     u32 val;
0373 
0374     /* Enable the completion interrupt for the command */
0375     gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
0376 
0377     val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
0378     val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
0379 
0380     timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
0381 
0382     gsi_irq_ev_ctrl_disable(gsi);
0383 
0384     if (!timeout)
0385         return;
0386 
0387     dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
0388         opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
0389 }
0390 
0391 /* Allocate an event ring in NOT_ALLOCATED state */
0392 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
0393 {
0394     enum gsi_evt_ring_state state;
0395 
0396     /* Get initial event ring state */
0397     state = gsi_evt_ring_state(gsi, evt_ring_id);
0398     if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
0399         dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
0400             evt_ring_id, state);
0401         return -EINVAL;
0402     }
0403 
0404     gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
0405 
0406     /* If successful the event ring state will have changed */
0407     state = gsi_evt_ring_state(gsi, evt_ring_id);
0408     if (state == GSI_EVT_RING_STATE_ALLOCATED)
0409         return 0;
0410 
0411     dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
0412         evt_ring_id, state);
0413 
0414     return -EIO;
0415 }
0416 
0417 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
0418 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
0419 {
0420     enum gsi_evt_ring_state state;
0421 
0422     state = gsi_evt_ring_state(gsi, evt_ring_id);
0423     if (state != GSI_EVT_RING_STATE_ALLOCATED &&
0424         state != GSI_EVT_RING_STATE_ERROR) {
0425         dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
0426             evt_ring_id, state);
0427         return;
0428     }
0429 
0430     gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
0431 
0432     /* If successful the event ring state will have changed */
0433     state = gsi_evt_ring_state(gsi, evt_ring_id);
0434     if (state == GSI_EVT_RING_STATE_ALLOCATED)
0435         return;
0436 
0437     dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
0438         evt_ring_id, state);
0439 }
0440 
0441 /* Issue a hardware de-allocation request for an allocated event ring */
0442 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
0443 {
0444     enum gsi_evt_ring_state state;
0445 
0446     state = gsi_evt_ring_state(gsi, evt_ring_id);
0447     if (state != GSI_EVT_RING_STATE_ALLOCATED) {
0448         dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
0449             evt_ring_id, state);
0450         return;
0451     }
0452 
0453     gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
0454 
0455     /* If successful the event ring state will have changed */
0456     state = gsi_evt_ring_state(gsi, evt_ring_id);
0457     if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
0458         return;
0459 
0460     dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
0461         evt_ring_id, state);
0462 }
0463 
0464 /* Fetch the current state of a channel from hardware */
0465 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
0466 {
0467     u32 channel_id = gsi_channel_id(channel);
0468     void __iomem *virt = channel->gsi->virt;
0469     u32 val;
0470 
0471     val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
0472 
0473     return u32_get_bits(val, CHSTATE_FMASK);
0474 }
0475 
0476 /* Issue a channel command and wait for it to complete */
0477 static void
0478 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
0479 {
0480     u32 channel_id = gsi_channel_id(channel);
0481     struct gsi *gsi = channel->gsi;
0482     struct device *dev = gsi->dev;
0483     bool timeout;
0484     u32 val;
0485 
0486     /* Enable the completion interrupt for the command */
0487     gsi_irq_ch_ctrl_enable(gsi, channel_id);
0488 
0489     val = u32_encode_bits(channel_id, CH_CHID_FMASK);
0490     val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
0491     timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
0492 
0493     gsi_irq_ch_ctrl_disable(gsi);
0494 
0495     if (!timeout)
0496         return;
0497 
0498     dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
0499         opcode, channel_id, gsi_channel_state(channel));
0500 }
0501 
0502 /* Allocate GSI channel in NOT_ALLOCATED state */
0503 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
0504 {
0505     struct gsi_channel *channel = &gsi->channel[channel_id];
0506     struct device *dev = gsi->dev;
0507     enum gsi_channel_state state;
0508 
0509     /* Get initial channel state */
0510     state = gsi_channel_state(channel);
0511     if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
0512         dev_err(dev, "channel %u bad state %u before alloc\n",
0513             channel_id, state);
0514         return -EINVAL;
0515     }
0516 
0517     gsi_channel_command(channel, GSI_CH_ALLOCATE);
0518 
0519     /* If successful the channel state will have changed */
0520     state = gsi_channel_state(channel);
0521     if (state == GSI_CHANNEL_STATE_ALLOCATED)
0522         return 0;
0523 
0524     dev_err(dev, "channel %u bad state %u after alloc\n",
0525         channel_id, state);
0526 
0527     return -EIO;
0528 }
0529 
0530 /* Start an ALLOCATED channel */
0531 static int gsi_channel_start_command(struct gsi_channel *channel)
0532 {
0533     struct device *dev = channel->gsi->dev;
0534     enum gsi_channel_state state;
0535 
0536     state = gsi_channel_state(channel);
0537     if (state != GSI_CHANNEL_STATE_ALLOCATED &&
0538         state != GSI_CHANNEL_STATE_STOPPED) {
0539         dev_err(dev, "channel %u bad state %u before start\n",
0540             gsi_channel_id(channel), state);
0541         return -EINVAL;
0542     }
0543 
0544     gsi_channel_command(channel, GSI_CH_START);
0545 
0546     /* If successful the channel state will have changed */
0547     state = gsi_channel_state(channel);
0548     if (state == GSI_CHANNEL_STATE_STARTED)
0549         return 0;
0550 
0551     dev_err(dev, "channel %u bad state %u after start\n",
0552         gsi_channel_id(channel), state);
0553 
0554     return -EIO;
0555 }
0556 
0557 /* Stop a GSI channel in STARTED state */
0558 static int gsi_channel_stop_command(struct gsi_channel *channel)
0559 {
0560     struct device *dev = channel->gsi->dev;
0561     enum gsi_channel_state state;
0562 
0563     state = gsi_channel_state(channel);
0564 
0565     /* Channel could have entered STOPPED state since last call
0566      * if it timed out.  If so, we're done.
0567      */
0568     if (state == GSI_CHANNEL_STATE_STOPPED)
0569         return 0;
0570 
0571     if (state != GSI_CHANNEL_STATE_STARTED &&
0572         state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
0573         dev_err(dev, "channel %u bad state %u before stop\n",
0574             gsi_channel_id(channel), state);
0575         return -EINVAL;
0576     }
0577 
0578     gsi_channel_command(channel, GSI_CH_STOP);
0579 
0580     /* If successful the channel state will have changed */
0581     state = gsi_channel_state(channel);
0582     if (state == GSI_CHANNEL_STATE_STOPPED)
0583         return 0;
0584 
0585     /* We may have to try again if stop is in progress */
0586     if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
0587         return -EAGAIN;
0588 
0589     dev_err(dev, "channel %u bad state %u after stop\n",
0590         gsi_channel_id(channel), state);
0591 
0592     return -EIO;
0593 }
0594 
0595 /* Reset a GSI channel in ALLOCATED or ERROR state. */
0596 static void gsi_channel_reset_command(struct gsi_channel *channel)
0597 {
0598     struct device *dev = channel->gsi->dev;
0599     enum gsi_channel_state state;
0600 
0601     /* A short delay is required before a RESET command */
0602     usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
0603 
0604     state = gsi_channel_state(channel);
0605     if (state != GSI_CHANNEL_STATE_STOPPED &&
0606         state != GSI_CHANNEL_STATE_ERROR) {
0607         /* No need to reset a channel already in ALLOCATED state */
0608         if (state != GSI_CHANNEL_STATE_ALLOCATED)
0609             dev_err(dev, "channel %u bad state %u before reset\n",
0610                 gsi_channel_id(channel), state);
0611         return;
0612     }
0613 
0614     gsi_channel_command(channel, GSI_CH_RESET);
0615 
0616     /* If successful the channel state will have changed */
0617     state = gsi_channel_state(channel);
0618     if (state != GSI_CHANNEL_STATE_ALLOCATED)
0619         dev_err(dev, "channel %u bad state %u after reset\n",
0620             gsi_channel_id(channel), state);
0621 }
0622 
0623 /* Deallocate an ALLOCATED GSI channel */
0624 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
0625 {
0626     struct gsi_channel *channel = &gsi->channel[channel_id];
0627     struct device *dev = gsi->dev;
0628     enum gsi_channel_state state;
0629 
0630     state = gsi_channel_state(channel);
0631     if (state != GSI_CHANNEL_STATE_ALLOCATED) {
0632         dev_err(dev, "channel %u bad state %u before dealloc\n",
0633             channel_id, state);
0634         return;
0635     }
0636 
0637     gsi_channel_command(channel, GSI_CH_DE_ALLOC);
0638 
0639     /* If successful the channel state will have changed */
0640     state = gsi_channel_state(channel);
0641 
0642     if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
0643         dev_err(dev, "channel %u bad state %u after dealloc\n",
0644             channel_id, state);
0645 }
0646 
0647 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
0648  * The index argument (modulo the ring count) is the first unfilled entry, so
0649  * we supply one less than that with the doorbell.  Update the event ring
0650  * index field with the value provided.
0651  */
0652 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
0653 {
0654     struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
0655     u32 val;
0656 
0657     ring->index = index;    /* Next unused entry */
0658 
0659     /* Note: index *must* be used modulo the ring count here */
0660     val = gsi_ring_addr(ring, (index - 1) % ring->count);
0661     iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
0662 }
0663 
0664 /* Program an event ring for use */
0665 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
0666 {
0667     struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
0668     struct gsi_ring *ring = &evt_ring->ring;
0669     size_t size;
0670     u32 val;
0671 
0672     /* We program all event rings as GPI type/protocol */
0673     val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
0674     val |= EV_INTYPE_FMASK;
0675     val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
0676     iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
0677 
0678     size = ring->count * GSI_RING_ELEMENT_SIZE;
0679     val = ev_r_length_encoded(gsi->version, size);
0680     iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
0681 
0682     /* The context 2 and 3 registers store the low-order and
0683      * high-order 32 bits of the address of the event ring,
0684      * respectively.
0685      */
0686     val = lower_32_bits(ring->addr);
0687     iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
0688     val = upper_32_bits(ring->addr);
0689     iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
0690 
0691     /* Enable interrupt moderation by setting the moderation delay */
0692     val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
0693     val |= u32_encode_bits(1, MODC_FMASK);  /* comes from channel */
0694     iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
0695 
0696     /* No MSI write data, and MSI address high and low address is 0 */
0697     iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
0698     iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
0699     iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
0700 
0701     /* We don't need to get event read pointer updates */
0702     iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
0703     iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
0704 
0705     /* Finally, tell the hardware our "last processed" event (arbitrary) */
0706     gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
0707 }
0708 
0709 /* Find the transaction whose completion indicates a channel is quiesced */
0710 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
0711 {
0712     struct gsi_trans_info *trans_info = &channel->trans_info;
0713     const struct list_head *list;
0714     struct gsi_trans *trans;
0715 
0716     spin_lock_bh(&trans_info->spinlock);
0717 
0718     /* There is a small chance a TX transaction got allocated just
0719      * before we disabled transmits, so check for that.
0720      */
0721     if (channel->toward_ipa) {
0722         list = &trans_info->alloc;
0723         if (!list_empty(list))
0724             goto done;
0725         list = &trans_info->committed;
0726         if (!list_empty(list))
0727             goto done;
0728         list = &trans_info->pending;
0729         if (!list_empty(list))
0730             goto done;
0731     }
0732 
0733     /* Otherwise (TX or RX) we want to wait for anything that
0734      * has completed, or has been polled but not released yet.
0735      */
0736     list = &trans_info->complete;
0737     if (!list_empty(list))
0738         goto done;
0739     list = &trans_info->polled;
0740     if (list_empty(list))
0741         list = NULL;
0742 done:
0743     trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
0744 
0745     /* Caller will wait for this, so take a reference */
0746     if (trans)
0747         refcount_inc(&trans->refcount);
0748 
0749     spin_unlock_bh(&trans_info->spinlock);
0750 
0751     return trans;
0752 }
0753 
0754 /* Wait for transaction activity on a channel to complete */
0755 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
0756 {
0757     struct gsi_trans *trans;
0758 
0759     /* Get the last transaction, and wait for it to complete */
0760     trans = gsi_channel_trans_last(channel);
0761     if (trans) {
0762         wait_for_completion(&trans->completion);
0763         gsi_trans_free(trans);
0764     }
0765 }
0766 
0767 /* Program a channel for use; there is no gsi_channel_deprogram() */
0768 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
0769 {
0770     size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
0771     u32 channel_id = gsi_channel_id(channel);
0772     union gsi_channel_scratch scr = { };
0773     struct gsi_channel_scratch_gpi *gpi;
0774     struct gsi *gsi = channel->gsi;
0775     u32 wrr_weight = 0;
0776     u32 val;
0777 
0778     /* We program all channels as GPI type/protocol */
0779     val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
0780     if (channel->toward_ipa)
0781         val |= CHTYPE_DIR_FMASK;
0782     val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
0783     val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
0784     iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
0785 
0786     val = r_length_encoded(gsi->version, size);
0787     iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
0788 
0789     /* The context 2 and 3 registers store the low-order and
0790      * high-order 32 bits of the address of the channel ring,
0791      * respectively.
0792      */
0793     val = lower_32_bits(channel->tre_ring.addr);
0794     iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
0795     val = upper_32_bits(channel->tre_ring.addr);
0796     iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
0797 
0798     /* Command channel gets low weighted round-robin priority */
0799     if (channel->command)
0800         wrr_weight = field_max(WRR_WEIGHT_FMASK);
0801     val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
0802 
0803     /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
0804 
0805     /* No need to use the doorbell engine starting at IPA v4.0 */
0806     if (gsi->version < IPA_VERSION_4_0 && doorbell)
0807         val |= USE_DB_ENG_FMASK;
0808 
0809     /* v4.0 introduces an escape buffer for prefetch.  We use it
0810      * on all but the AP command channel.
0811      */
0812     if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
0813         /* If not otherwise set, prefetch buffers are used */
0814         if (gsi->version < IPA_VERSION_4_5)
0815             val |= USE_ESCAPE_BUF_ONLY_FMASK;
0816         else
0817             val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
0818                            PREFETCH_MODE_FMASK);
0819     }
0820     /* All channels set DB_IN_BYTES */
0821     if (gsi->version >= IPA_VERSION_4_9)
0822         val |= DB_IN_BYTES;
0823 
0824     iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
0825 
0826     /* Now update the scratch registers for GPI protocol */
0827     gpi = &scr.gpi;
0828     gpi->max_outstanding_tre = channel->trans_tre_max *
0829                     GSI_RING_ELEMENT_SIZE;
0830     gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
0831 
0832     val = scr.data.word1;
0833     iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
0834 
0835     val = scr.data.word2;
0836     iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
0837 
0838     val = scr.data.word3;
0839     iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
0840 
0841     /* We must preserve the upper 16 bits of the last scratch register.
0842      * The next sequence assumes those bits remain unchanged between the
0843      * read and the write.
0844      */
0845     val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
0846     val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
0847     iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
0848 
0849     /* All done! */
0850 }
0851 
0852 static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
0853 {
0854     struct gsi *gsi = channel->gsi;
0855     int ret;
0856 
0857     /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
0858     if (resume && gsi->version < IPA_VERSION_4_0)
0859         return 0;
0860 
0861     mutex_lock(&gsi->mutex);
0862 
0863     ret = gsi_channel_start_command(channel);
0864 
0865     mutex_unlock(&gsi->mutex);
0866 
0867     return ret;
0868 }
0869 
0870 /* Start an allocated GSI channel */
0871 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
0872 {
0873     struct gsi_channel *channel = &gsi->channel[channel_id];
0874     int ret;
0875 
0876     /* Enable NAPI and the completion interrupt */
0877     napi_enable(&channel->napi);
0878     gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
0879 
0880     ret = __gsi_channel_start(channel, false);
0881     if (ret) {
0882         gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
0883         napi_disable(&channel->napi);
0884     }
0885 
0886     return ret;
0887 }
0888 
0889 static int gsi_channel_stop_retry(struct gsi_channel *channel)
0890 {
0891     u32 retries = GSI_CHANNEL_STOP_RETRIES;
0892     int ret;
0893 
0894     do {
0895         ret = gsi_channel_stop_command(channel);
0896         if (ret != -EAGAIN)
0897             break;
0898         usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
0899     } while (retries--);
0900 
0901     return ret;
0902 }
0903 
0904 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
0905 {
0906     struct gsi *gsi = channel->gsi;
0907     int ret;
0908 
0909     /* Wait for any underway transactions to complete before stopping. */
0910     gsi_channel_trans_quiesce(channel);
0911 
0912     /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
0913     if (suspend && gsi->version < IPA_VERSION_4_0)
0914         return 0;
0915 
0916     mutex_lock(&gsi->mutex);
0917 
0918     ret = gsi_channel_stop_retry(channel);
0919 
0920     mutex_unlock(&gsi->mutex);
0921 
0922     return ret;
0923 }
0924 
0925 /* Stop a started channel */
0926 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
0927 {
0928     struct gsi_channel *channel = &gsi->channel[channel_id];
0929     int ret;
0930 
0931     ret = __gsi_channel_stop(channel, false);
0932     if (ret)
0933         return ret;
0934 
0935     /* Disable the completion interrupt and NAPI if successful */
0936     gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
0937     napi_disable(&channel->napi);
0938 
0939     return 0;
0940 }
0941 
0942 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
0943 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
0944 {
0945     struct gsi_channel *channel = &gsi->channel[channel_id];
0946 
0947     mutex_lock(&gsi->mutex);
0948 
0949     gsi_channel_reset_command(channel);
0950     /* Due to a hardware quirk we may need to reset RX channels twice. */
0951     if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
0952         gsi_channel_reset_command(channel);
0953 
0954     /* Hardware assumes this is 0 following reset */
0955     channel->tre_ring.index = 0;
0956     gsi_channel_program(channel, doorbell);
0957     gsi_channel_trans_cancel_pending(channel);
0958 
0959     mutex_unlock(&gsi->mutex);
0960 }
0961 
0962 /* Stop a started channel for suspend */
0963 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
0964 {
0965     struct gsi_channel *channel = &gsi->channel[channel_id];
0966     int ret;
0967 
0968     ret = __gsi_channel_stop(channel, true);
0969     if (ret)
0970         return ret;
0971 
0972     /* Ensure NAPI polling has finished. */
0973     napi_synchronize(&channel->napi);
0974 
0975     return 0;
0976 }
0977 
0978 /* Resume a suspended channel (starting if stopped) */
0979 int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
0980 {
0981     struct gsi_channel *channel = &gsi->channel[channel_id];
0982 
0983     return __gsi_channel_start(channel, true);
0984 }
0985 
0986 /* Prevent all GSI interrupts while suspended */
0987 void gsi_suspend(struct gsi *gsi)
0988 {
0989     disable_irq(gsi->irq);
0990 }
0991 
0992 /* Allow all GSI interrupts again when resuming */
0993 void gsi_resume(struct gsi *gsi)
0994 {
0995     enable_irq(gsi->irq);
0996 }
0997 
0998 void gsi_trans_tx_committed(struct gsi_trans *trans)
0999 {
1000     struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
1001 
1002     channel->trans_count++;
1003     channel->byte_count += trans->len;
1004 
1005     trans->trans_count = channel->trans_count;
1006     trans->byte_count = channel->byte_count;
1007 }
1008 
1009 void gsi_trans_tx_queued(struct gsi_trans *trans)
1010 {
1011     u32 channel_id = trans->channel_id;
1012     struct gsi *gsi = trans->gsi;
1013     struct gsi_channel *channel;
1014     u32 trans_count;
1015     u32 byte_count;
1016 
1017     channel = &gsi->channel[channel_id];
1018 
1019     byte_count = channel->byte_count - channel->queued_byte_count;
1020     trans_count = channel->trans_count - channel->queued_trans_count;
1021     channel->queued_byte_count = channel->byte_count;
1022     channel->queued_trans_count = channel->trans_count;
1023 
1024     ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
1025 }
1026 
1027 /**
1028  * gsi_trans_tx_completed() - Report completed TX transactions
1029  * @trans:  TX channel transaction that has completed
1030  *
1031  * Report that a transaction on a TX channel has completed.  At the time a
1032  * transaction is committed, we record *in the transaction* its channel's
1033  * committed transaction and byte counts.  Transactions are completed in
1034  * order, and the difference between the channel's byte/transaction count
1035  * when the transaction was committed and when it completes tells us
1036  * exactly how much data has been transferred while the transaction was
1037  * pending.
1038  *
1039  * We report this information to the network stack, which uses it to manage
1040  * the rate at which data is sent to hardware.
1041  */
1042 static void gsi_trans_tx_completed(struct gsi_trans *trans)
1043 {
1044     u32 channel_id = trans->channel_id;
1045     struct gsi *gsi = trans->gsi;
1046     struct gsi_channel *channel;
1047     u32 trans_count;
1048     u32 byte_count;
1049 
1050     channel = &gsi->channel[channel_id];
1051     trans_count = trans->trans_count - channel->compl_trans_count;
1052     byte_count = trans->byte_count - channel->compl_byte_count;
1053 
1054     channel->compl_trans_count += trans_count;
1055     channel->compl_byte_count += byte_count;
1056 
1057     ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
1058 }
1059 
1060 /* Channel control interrupt handler */
1061 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1062 {
1063     u32 channel_mask;
1064 
1065     channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1066     iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1067 
1068     while (channel_mask) {
1069         u32 channel_id = __ffs(channel_mask);
1070 
1071         channel_mask ^= BIT(channel_id);
1072 
1073         complete(&gsi->completion);
1074     }
1075 }
1076 
1077 /* Event ring control interrupt handler */
1078 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1079 {
1080     u32 event_mask;
1081 
1082     event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1083     iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1084 
1085     while (event_mask) {
1086         u32 evt_ring_id = __ffs(event_mask);
1087 
1088         event_mask ^= BIT(evt_ring_id);
1089 
1090         complete(&gsi->completion);
1091     }
1092 }
1093 
1094 /* Global channel error interrupt handler */
1095 static void
1096 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1097 {
1098     if (code == GSI_OUT_OF_RESOURCES) {
1099         dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1100         complete(&gsi->completion);
1101         return;
1102     }
1103 
1104     /* Report, but otherwise ignore all other error codes */
1105     dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1106         channel_id, err_ee, code);
1107 }
1108 
1109 /* Global event error interrupt handler */
1110 static void
1111 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1112 {
1113     if (code == GSI_OUT_OF_RESOURCES) {
1114         struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1115         u32 channel_id = gsi_channel_id(evt_ring->channel);
1116 
1117         complete(&gsi->completion);
1118         dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1119             channel_id);
1120         return;
1121     }
1122 
1123     /* Report, but otherwise ignore all other error codes */
1124     dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1125         evt_ring_id, err_ee, code);
1126 }
1127 
1128 /* Global error interrupt handler */
1129 static void gsi_isr_glob_err(struct gsi *gsi)
1130 {
1131     enum gsi_err_type type;
1132     enum gsi_err_code code;
1133     u32 which;
1134     u32 val;
1135     u32 ee;
1136 
1137     /* Get the logged error, then reinitialize the log */
1138     val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1139     iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1140     iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1141 
1142     ee = u32_get_bits(val, ERR_EE_FMASK);
1143     type = u32_get_bits(val, ERR_TYPE_FMASK);
1144     which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1145     code = u32_get_bits(val, ERR_CODE_FMASK);
1146 
1147     if (type == GSI_ERR_TYPE_CHAN)
1148         gsi_isr_glob_chan_err(gsi, ee, which, code);
1149     else if (type == GSI_ERR_TYPE_EVT)
1150         gsi_isr_glob_evt_err(gsi, ee, which, code);
1151     else    /* type GSI_ERR_TYPE_GLOB should be fatal */
1152         dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1153 }
1154 
1155 /* Generic EE interrupt handler */
1156 static void gsi_isr_gp_int1(struct gsi *gsi)
1157 {
1158     u32 result;
1159     u32 val;
1160 
1161     /* This interrupt is used to handle completions of GENERIC GSI
1162      * commands.  We use these to allocate and halt channels on the
1163      * modem's behalf due to a hardware quirk on IPA v4.2.  The modem
1164      * "owns" channels even when the AP allocates them, and have no
1165      * way of knowing whether a modem channel's state has been changed.
1166      *
1167      * We also use GENERIC commands to enable/disable channel flow
1168      * control for IPA v4.2+.
1169      *
1170      * It is recommended that we halt the modem channels we allocated
1171      * when shutting down, but it's possible the channel isn't running
1172      * at the time we issue the HALT command.  We'll get an error in
1173      * that case, but it's harmless (the channel is already halted).
1174      * Similarly, we could get an error back when updating flow control
1175      * on a channel because it's not in the proper state.
1176      *
1177      * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
1178      * error if we receive it.
1179      */
1180     val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1181     result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1182 
1183     switch (result) {
1184     case GENERIC_EE_SUCCESS:
1185     case GENERIC_EE_INCORRECT_CHANNEL_STATE:
1186         gsi->result = 0;
1187         break;
1188 
1189     case GENERIC_EE_RETRY:
1190         gsi->result = -EAGAIN;
1191         break;
1192 
1193     default:
1194         dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1195         gsi->result = -EIO;
1196         break;
1197     }
1198 
1199     complete(&gsi->completion);
1200 }
1201 
1202 /* Inter-EE interrupt handler */
1203 static void gsi_isr_glob_ee(struct gsi *gsi)
1204 {
1205     u32 val;
1206 
1207     val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1208 
1209     if (val & BIT(ERROR_INT))
1210         gsi_isr_glob_err(gsi);
1211 
1212     iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1213 
1214     val &= ~BIT(ERROR_INT);
1215 
1216     if (val & BIT(GP_INT1)) {
1217         val ^= BIT(GP_INT1);
1218         gsi_isr_gp_int1(gsi);
1219     }
1220 
1221     if (val)
1222         dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1223 }
1224 
1225 /* I/O completion interrupt event */
1226 static void gsi_isr_ieob(struct gsi *gsi)
1227 {
1228     u32 event_mask;
1229 
1230     event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1231     gsi_irq_ieob_disable(gsi, event_mask);
1232     iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1233 
1234     while (event_mask) {
1235         u32 evt_ring_id = __ffs(event_mask);
1236 
1237         event_mask ^= BIT(evt_ring_id);
1238 
1239         napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1240     }
1241 }
1242 
1243 /* General event interrupts represent serious problems, so report them */
1244 static void gsi_isr_general(struct gsi *gsi)
1245 {
1246     struct device *dev = gsi->dev;
1247     u32 val;
1248 
1249     val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1250     iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1251 
1252     dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1253 }
1254 
1255 /**
1256  * gsi_isr() - Top level GSI interrupt service routine
1257  * @irq:    Interrupt number (ignored)
1258  * @dev_id: GSI pointer supplied to request_irq()
1259  *
1260  * This is the main handler function registered for the GSI IRQ. Each type
1261  * of interrupt has a separate handler function that is called from here.
1262  */
1263 static irqreturn_t gsi_isr(int irq, void *dev_id)
1264 {
1265     struct gsi *gsi = dev_id;
1266     u32 intr_mask;
1267     u32 cnt = 0;
1268 
1269     /* enum gsi_irq_type_id defines GSI interrupt types */
1270     while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1271         /* intr_mask contains bitmask of pending GSI interrupts */
1272         do {
1273             u32 gsi_intr = BIT(__ffs(intr_mask));
1274 
1275             intr_mask ^= gsi_intr;
1276 
1277             switch (gsi_intr) {
1278             case BIT(GSI_CH_CTRL):
1279                 gsi_isr_chan_ctrl(gsi);
1280                 break;
1281             case BIT(GSI_EV_CTRL):
1282                 gsi_isr_evt_ctrl(gsi);
1283                 break;
1284             case BIT(GSI_GLOB_EE):
1285                 gsi_isr_glob_ee(gsi);
1286                 break;
1287             case BIT(GSI_IEOB):
1288                 gsi_isr_ieob(gsi);
1289                 break;
1290             case BIT(GSI_GENERAL):
1291                 gsi_isr_general(gsi);
1292                 break;
1293             default:
1294                 dev_err(gsi->dev,
1295                     "unrecognized interrupt type 0x%08x\n",
1296                     gsi_intr);
1297                 break;
1298             }
1299         } while (intr_mask);
1300 
1301         if (++cnt > GSI_ISR_MAX_ITER) {
1302             dev_err(gsi->dev, "interrupt flood\n");
1303             break;
1304         }
1305     }
1306 
1307     return IRQ_HANDLED;
1308 }
1309 
1310 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
1311 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1312 {
1313     int ret;
1314 
1315     ret = platform_get_irq_byname(pdev, "gsi");
1316     if (ret <= 0)
1317         return ret ? : -EINVAL;
1318 
1319     gsi->irq = ret;
1320 
1321     return 0;
1322 }
1323 
1324 /* Return the transaction associated with a transfer completion event */
1325 static struct gsi_trans *
1326 gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
1327 {
1328     u32 channel_id = event->chid;
1329     struct gsi_channel *channel;
1330     struct gsi_trans *trans;
1331     u32 tre_offset;
1332     u32 tre_index;
1333 
1334     channel = &gsi->channel[channel_id];
1335     if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
1336         return NULL;
1337 
1338     /* Event xfer_ptr records the TRE it's associated with */
1339     tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1340     tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1341 
1342     trans = gsi_channel_trans_mapped(channel, tre_index);
1343 
1344     if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
1345         return NULL;
1346 
1347     return trans;
1348 }
1349 
1350 /**
1351  * gsi_evt_ring_update() - Update transaction state from hardware
1352  * @gsi:        GSI pointer
1353  * @evt_ring_id:    Event ring ID
1354  * @index:      Event index in ring reported by hardware
1355  *
1356  * Events for RX channels contain the actual number of bytes received into
1357  * the buffer.  Every event has a transaction associated with it, and here
1358  * we update transactions to record their actual received lengths.
1359  *
1360  * When an event for a TX channel arrives we use information in the
1361  * transaction to report the number of requests and bytes have been
1362  * transferred.
1363  *
1364  * This function is called whenever we learn that the GSI hardware has filled
1365  * new events since the last time we checked.  The ring's index field tells
1366  * the first entry in need of processing.  The index provided is the
1367  * first *unfilled* event in the ring (following the last filled one).
1368  *
1369  * Events are sequential within the event ring, and transactions are
1370  * sequential within the transaction array.
1371  *
1372  * Note that @index always refers to an element *within* the event ring.
1373  */
1374 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
1375 {
1376     struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1377     struct gsi_ring *ring = &evt_ring->ring;
1378     struct gsi_event *event_done;
1379     struct gsi_event *event;
1380     u32 event_avail;
1381     u32 old_index;
1382 
1383     /* Starting with the oldest un-processed event, determine which
1384      * transaction (and which channel) is associated with the event.
1385      * For RX channels, update each completed transaction with the
1386      * number of bytes that were actually received.  For TX channels
1387      * associated with a network device, report to the network stack
1388      * the number of transfers and bytes this completion represents.
1389      */
1390     old_index = ring->index;
1391     event = gsi_ring_virt(ring, old_index);
1392 
1393     /* Compute the number of events to process before we wrap,
1394      * and determine when we'll be done processing events.
1395      */
1396     event_avail = ring->count - old_index % ring->count;
1397     event_done = gsi_ring_virt(ring, index);
1398     do {
1399         struct gsi_trans *trans;
1400 
1401         trans = gsi_event_trans(gsi, event);
1402         if (!trans)
1403             return;
1404 
1405         if (trans->direction == DMA_FROM_DEVICE)
1406             trans->len = __le16_to_cpu(event->len);
1407         else
1408             gsi_trans_tx_completed(trans);
1409 
1410         gsi_trans_move_complete(trans);
1411 
1412         /* Move on to the next event and transaction */
1413         if (--event_avail)
1414             event++;
1415         else
1416             event = gsi_ring_virt(ring, 0);
1417     } while (event != event_done);
1418 
1419     /* Tell the hardware we've handled these events */
1420     gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1421 }
1422 
1423 /* Initialize a ring, including allocating DMA memory for its entries */
1424 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1425 {
1426     u32 size = count * GSI_RING_ELEMENT_SIZE;
1427     struct device *dev = gsi->dev;
1428     dma_addr_t addr;
1429 
1430     /* Hardware requires a 2^n ring size, with alignment equal to size.
1431      * The DMA address returned by dma_alloc_coherent() is guaranteed to
1432      * be a power-of-2 number of pages, which satisfies the requirement.
1433      */
1434     ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1435     if (!ring->virt)
1436         return -ENOMEM;
1437 
1438     ring->addr = addr;
1439     ring->count = count;
1440     ring->index = 0;
1441 
1442     return 0;
1443 }
1444 
1445 /* Free a previously-allocated ring */
1446 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1447 {
1448     size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1449 
1450     dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1451 }
1452 
1453 /* Allocate an available event ring id */
1454 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1455 {
1456     u32 evt_ring_id;
1457 
1458     if (gsi->event_bitmap == ~0U) {
1459         dev_err(gsi->dev, "event rings exhausted\n");
1460         return -ENOSPC;
1461     }
1462 
1463     evt_ring_id = ffz(gsi->event_bitmap);
1464     gsi->event_bitmap |= BIT(evt_ring_id);
1465 
1466     return (int)evt_ring_id;
1467 }
1468 
1469 /* Free a previously-allocated event ring id */
1470 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1471 {
1472     gsi->event_bitmap &= ~BIT(evt_ring_id);
1473 }
1474 
1475 /* Ring a channel doorbell, reporting the first un-filled entry */
1476 void gsi_channel_doorbell(struct gsi_channel *channel)
1477 {
1478     struct gsi_ring *tre_ring = &channel->tre_ring;
1479     u32 channel_id = gsi_channel_id(channel);
1480     struct gsi *gsi = channel->gsi;
1481     u32 val;
1482 
1483     /* Note: index *must* be used modulo the ring count here */
1484     val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1485     iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1486 }
1487 
1488 /* Consult hardware, move any newly completed transactions to completed list */
1489 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1490 {
1491     u32 evt_ring_id = channel->evt_ring_id;
1492     struct gsi *gsi = channel->gsi;
1493     struct gsi_evt_ring *evt_ring;
1494     struct gsi_trans *trans;
1495     struct gsi_ring *ring;
1496     u32 offset;
1497     u32 index;
1498 
1499     evt_ring = &gsi->evt_ring[evt_ring_id];
1500     ring = &evt_ring->ring;
1501 
1502     /* See if there's anything new to process; if not, we're done.  Note
1503      * that index always refers to an entry *within* the event ring.
1504      */
1505     offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1506     index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1507     if (index == ring->index % ring->count)
1508         return NULL;
1509 
1510     /* Get the transaction for the latest completed event. */
1511     trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
1512     if (!trans)
1513         return NULL;
1514 
1515     /* For RX channels, update each completed transaction with the number
1516      * of bytes that were actually received.  For TX channels, report
1517      * the number of transactions and bytes this completion represents
1518      * up the network stack.
1519      */
1520     gsi_evt_ring_update(gsi, evt_ring_id, index);
1521 
1522     return gsi_channel_trans_complete(channel);
1523 }
1524 
1525 /**
1526  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1527  * @channel:    Channel to be polled
1528  *
1529  * Return:  Transaction pointer, or null if none are available
1530  *
1531  * This function returns the first entry on a channel's completed transaction
1532  * list.  If that list is empty, the hardware is consulted to determine
1533  * whether any new transactions have completed.  If so, they're moved to the
1534  * completed list and the new first entry is returned.  If there are no more
1535  * completed transactions, a null pointer is returned.
1536  */
1537 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1538 {
1539     struct gsi_trans *trans;
1540 
1541     /* Get the first transaction from the completed list */
1542     trans = gsi_channel_trans_complete(channel);
1543     if (!trans) /* List is empty; see if there's more to do */
1544         trans = gsi_channel_update(channel);
1545 
1546     if (trans)
1547         gsi_trans_move_polled(trans);
1548 
1549     return trans;
1550 }
1551 
1552 /**
1553  * gsi_channel_poll() - NAPI poll function for a channel
1554  * @napi:   NAPI structure for the channel
1555  * @budget: Budget supplied by NAPI core
1556  *
1557  * Return:  Number of items polled (<= budget)
1558  *
1559  * Single transactions completed by hardware are polled until either
1560  * the budget is exhausted, or there are no more.  Each transaction
1561  * polled is passed to gsi_trans_complete(), to perform remaining
1562  * completion processing and retire/free the transaction.
1563  */
1564 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1565 {
1566     struct gsi_channel *channel;
1567     int count;
1568 
1569     channel = container_of(napi, struct gsi_channel, napi);
1570     for (count = 0; count < budget; count++) {
1571         struct gsi_trans *trans;
1572 
1573         trans = gsi_channel_poll_one(channel);
1574         if (!trans)
1575             break;
1576         gsi_trans_complete(trans);
1577     }
1578 
1579     if (count < budget && napi_complete(napi))
1580         gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1581 
1582     return count;
1583 }
1584 
1585 /* The event bitmap represents which event ids are available for allocation.
1586  * Set bits are not available, clear bits can be used.  This function
1587  * initializes the map so all events supported by the hardware are available,
1588  * then precludes any reserved events from being allocated.
1589  */
1590 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1591 {
1592     u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1593 
1594     event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1595 
1596     return event_bitmap;
1597 }
1598 
1599 /* Setup function for a single channel */
1600 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1601 {
1602     struct gsi_channel *channel = &gsi->channel[channel_id];
1603     u32 evt_ring_id = channel->evt_ring_id;
1604     int ret;
1605 
1606     if (!gsi_channel_initialized(channel))
1607         return 0;
1608 
1609     ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1610     if (ret)
1611         return ret;
1612 
1613     gsi_evt_ring_program(gsi, evt_ring_id);
1614 
1615     ret = gsi_channel_alloc_command(gsi, channel_id);
1616     if (ret)
1617         goto err_evt_ring_de_alloc;
1618 
1619     gsi_channel_program(channel, true);
1620 
1621     if (channel->toward_ipa)
1622         netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1623                   gsi_channel_poll);
1624     else
1625         netif_napi_add(&gsi->dummy_dev, &channel->napi,
1626                    gsi_channel_poll, NAPI_POLL_WEIGHT);
1627 
1628     return 0;
1629 
1630 err_evt_ring_de_alloc:
1631     /* We've done nothing with the event ring yet so don't reset */
1632     gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1633 
1634     return ret;
1635 }
1636 
1637 /* Inverse of gsi_channel_setup_one() */
1638 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1639 {
1640     struct gsi_channel *channel = &gsi->channel[channel_id];
1641     u32 evt_ring_id = channel->evt_ring_id;
1642 
1643     if (!gsi_channel_initialized(channel))
1644         return;
1645 
1646     netif_napi_del(&channel->napi);
1647 
1648     gsi_channel_de_alloc_command(gsi, channel_id);
1649     gsi_evt_ring_reset_command(gsi, evt_ring_id);
1650     gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1651 }
1652 
1653 /* We use generic commands only to operate on modem channels.  We don't have
1654  * the ability to determine channel state for a modem channel, so we simply
1655  * issue the command and wait for it to complete.
1656  */
1657 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1658                    enum gsi_generic_cmd_opcode opcode,
1659                    u8 params)
1660 {
1661     bool timeout;
1662     u32 val;
1663 
1664     /* The error global interrupt type is always enabled (until we tear
1665      * down), so we will keep it enabled.
1666      *
1667      * A generic EE command completes with a GSI global interrupt of
1668      * type GP_INT1.  We only perform one generic command at a time
1669      * (to allocate, halt, or enable/disable flow control on a modem
1670      * channel), and only from this function.  So we enable the GP_INT1
1671      * IRQ type here, and disable it again after the command completes.
1672      */
1673     val = BIT(ERROR_INT) | BIT(GP_INT1);
1674     iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1675 
1676     /* First zero the result code field */
1677     val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1678     val &= ~GENERIC_EE_RESULT_FMASK;
1679     iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1680 
1681     /* Now issue the command */
1682     val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1683     val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1684     val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1685     val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1686 
1687     timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1688 
1689     /* Disable the GP_INT1 IRQ type again */
1690     iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1691 
1692     if (!timeout)
1693         return gsi->result;
1694 
1695     dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1696         opcode, channel_id);
1697 
1698     return -ETIMEDOUT;
1699 }
1700 
1701 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1702 {
1703     return gsi_generic_command(gsi, channel_id,
1704                    GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1705 }
1706 
1707 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1708 {
1709     u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1710     int ret;
1711 
1712     do
1713         ret = gsi_generic_command(gsi, channel_id,
1714                       GSI_GENERIC_HALT_CHANNEL, 0);
1715     while (ret == -EAGAIN && retries--);
1716 
1717     if (ret)
1718         dev_err(gsi->dev, "error %d halting modem channel %u\n",
1719             ret, channel_id);
1720 }
1721 
1722 /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1723 void
1724 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1725 {
1726     u32 retries = 0;
1727     u32 command;
1728     int ret;
1729 
1730     command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1731              : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1732     /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
1733      * is underway.  In this case we need to retry the command.
1734      */
1735     if (!enable && gsi->version >= IPA_VERSION_4_11)
1736         retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1737 
1738     do
1739         ret = gsi_generic_command(gsi, channel_id, command, 0);
1740     while (ret == -EAGAIN && retries--);
1741 
1742     if (ret)
1743         dev_err(gsi->dev,
1744             "error %d %sabling mode channel %u flow control\n",
1745             ret, enable ? "en" : "dis", channel_id);
1746 }
1747 
1748 /* Setup function for channels */
1749 static int gsi_channel_setup(struct gsi *gsi)
1750 {
1751     u32 channel_id = 0;
1752     u32 mask;
1753     int ret;
1754 
1755     gsi_irq_enable(gsi);
1756 
1757     mutex_lock(&gsi->mutex);
1758 
1759     do {
1760         ret = gsi_channel_setup_one(gsi, channel_id);
1761         if (ret)
1762             goto err_unwind;
1763     } while (++channel_id < gsi->channel_count);
1764 
1765     /* Make sure no channels were defined that hardware does not support */
1766     while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1767         struct gsi_channel *channel = &gsi->channel[channel_id++];
1768 
1769         if (!gsi_channel_initialized(channel))
1770             continue;
1771 
1772         ret = -EINVAL;
1773         dev_err(gsi->dev, "channel %u not supported by hardware\n",
1774             channel_id - 1);
1775         channel_id = gsi->channel_count;
1776         goto err_unwind;
1777     }
1778 
1779     /* Allocate modem channels if necessary */
1780     mask = gsi->modem_channel_bitmap;
1781     while (mask) {
1782         u32 modem_channel_id = __ffs(mask);
1783 
1784         ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1785         if (ret)
1786             goto err_unwind_modem;
1787 
1788         /* Clear bit from mask only after success (for unwind) */
1789         mask ^= BIT(modem_channel_id);
1790     }
1791 
1792     mutex_unlock(&gsi->mutex);
1793 
1794     return 0;
1795 
1796 err_unwind_modem:
1797     /* Compute which modem channels need to be deallocated */
1798     mask ^= gsi->modem_channel_bitmap;
1799     while (mask) {
1800         channel_id = __fls(mask);
1801 
1802         mask ^= BIT(channel_id);
1803 
1804         gsi_modem_channel_halt(gsi, channel_id);
1805     }
1806 
1807 err_unwind:
1808     while (channel_id--)
1809         gsi_channel_teardown_one(gsi, channel_id);
1810 
1811     mutex_unlock(&gsi->mutex);
1812 
1813     gsi_irq_disable(gsi);
1814 
1815     return ret;
1816 }
1817 
1818 /* Inverse of gsi_channel_setup() */
1819 static void gsi_channel_teardown(struct gsi *gsi)
1820 {
1821     u32 mask = gsi->modem_channel_bitmap;
1822     u32 channel_id;
1823 
1824     mutex_lock(&gsi->mutex);
1825 
1826     while (mask) {
1827         channel_id = __fls(mask);
1828 
1829         mask ^= BIT(channel_id);
1830 
1831         gsi_modem_channel_halt(gsi, channel_id);
1832     }
1833 
1834     channel_id = gsi->channel_count - 1;
1835     do
1836         gsi_channel_teardown_one(gsi, channel_id);
1837     while (channel_id--);
1838 
1839     mutex_unlock(&gsi->mutex);
1840 
1841     gsi_irq_disable(gsi);
1842 }
1843 
1844 /* Turn off all GSI interrupts initially */
1845 static int gsi_irq_setup(struct gsi *gsi)
1846 {
1847     int ret;
1848 
1849     /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1850     iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1851 
1852     /* Disable all interrupt types */
1853     gsi_irq_type_update(gsi, 0);
1854 
1855     /* Clear all type-specific interrupt masks */
1856     iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1857     iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1858     iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1859     iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1860 
1861     /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
1862     if (gsi->version > IPA_VERSION_3_1) {
1863         u32 offset;
1864 
1865         /* These registers are in the non-adjusted address range */
1866         offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1867         iowrite32(0, gsi->virt_raw + offset);
1868         offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1869         iowrite32(0, gsi->virt_raw + offset);
1870     }
1871 
1872     iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1873 
1874     ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1875     if (ret)
1876         dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1877 
1878     return ret;
1879 }
1880 
1881 static void gsi_irq_teardown(struct gsi *gsi)
1882 {
1883     free_irq(gsi->irq, gsi);
1884 }
1885 
1886 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */
1887 static int gsi_ring_setup(struct gsi *gsi)
1888 {
1889     struct device *dev = gsi->dev;
1890     u32 count;
1891     u32 val;
1892 
1893     if (gsi->version < IPA_VERSION_3_5_1) {
1894         /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
1895         gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1896         gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1897 
1898         return 0;
1899     }
1900 
1901     val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1902 
1903     count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1904     if (!count) {
1905         dev_err(dev, "GSI reports zero channels supported\n");
1906         return -EINVAL;
1907     }
1908     if (count > GSI_CHANNEL_COUNT_MAX) {
1909         dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1910              GSI_CHANNEL_COUNT_MAX, count);
1911         count = GSI_CHANNEL_COUNT_MAX;
1912     }
1913     gsi->channel_count = count;
1914 
1915     count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1916     if (!count) {
1917         dev_err(dev, "GSI reports zero event rings supported\n");
1918         return -EINVAL;
1919     }
1920     if (count > GSI_EVT_RING_COUNT_MAX) {
1921         dev_warn(dev,
1922              "limiting to %u event rings; hardware supports %u\n",
1923              GSI_EVT_RING_COUNT_MAX, count);
1924         count = GSI_EVT_RING_COUNT_MAX;
1925     }
1926     gsi->evt_ring_count = count;
1927 
1928     return 0;
1929 }
1930 
1931 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1932 int gsi_setup(struct gsi *gsi)
1933 {
1934     u32 val;
1935     int ret;
1936 
1937     /* Here is where we first touch the GSI hardware */
1938     val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1939     if (!(val & ENABLED_FMASK)) {
1940         dev_err(gsi->dev, "GSI has not been enabled\n");
1941         return -EIO;
1942     }
1943 
1944     ret = gsi_irq_setup(gsi);
1945     if (ret)
1946         return ret;
1947 
1948     ret = gsi_ring_setup(gsi);  /* No matching teardown required */
1949     if (ret)
1950         goto err_irq_teardown;
1951 
1952     /* Initialize the error log */
1953     iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1954 
1955     ret = gsi_channel_setup(gsi);
1956     if (ret)
1957         goto err_irq_teardown;
1958 
1959     return 0;
1960 
1961 err_irq_teardown:
1962     gsi_irq_teardown(gsi);
1963 
1964     return ret;
1965 }
1966 
1967 /* Inverse of gsi_setup() */
1968 void gsi_teardown(struct gsi *gsi)
1969 {
1970     gsi_channel_teardown(gsi);
1971     gsi_irq_teardown(gsi);
1972 }
1973 
1974 /* Initialize a channel's event ring */
1975 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1976 {
1977     struct gsi *gsi = channel->gsi;
1978     struct gsi_evt_ring *evt_ring;
1979     int ret;
1980 
1981     ret = gsi_evt_ring_id_alloc(gsi);
1982     if (ret < 0)
1983         return ret;
1984     channel->evt_ring_id = ret;
1985 
1986     evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1987     evt_ring->channel = channel;
1988 
1989     ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1990     if (!ret)
1991         return 0;   /* Success! */
1992 
1993     dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1994         ret, gsi_channel_id(channel));
1995 
1996     gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1997 
1998     return ret;
1999 }
2000 
2001 /* Inverse of gsi_channel_evt_ring_init() */
2002 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
2003 {
2004     u32 evt_ring_id = channel->evt_ring_id;
2005     struct gsi *gsi = channel->gsi;
2006     struct gsi_evt_ring *evt_ring;
2007 
2008     evt_ring = &gsi->evt_ring[evt_ring_id];
2009     gsi_ring_free(gsi, &evt_ring->ring);
2010     gsi_evt_ring_id_free(gsi, evt_ring_id);
2011 }
2012 
2013 static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
2014                    const struct ipa_gsi_endpoint_data *data)
2015 {
2016     const struct gsi_channel_data *channel_data;
2017     u32 channel_id = data->channel_id;
2018     struct device *dev = gsi->dev;
2019 
2020     /* Make sure channel ids are in the range driver supports */
2021     if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2022         dev_err(dev, "bad channel id %u; must be less than %u\n",
2023             channel_id, GSI_CHANNEL_COUNT_MAX);
2024         return false;
2025     }
2026 
2027     if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2028         dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2029         return false;
2030     }
2031 
2032     if (command && !data->toward_ipa) {
2033         dev_err(dev, "command channel %u is not TX\n", channel_id);
2034         return false;
2035     }
2036 
2037     channel_data = &data->channel;
2038 
2039     if (!channel_data->tlv_count ||
2040         channel_data->tlv_count > GSI_TLV_MAX) {
2041         dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2042             channel_id, channel_data->tlv_count, GSI_TLV_MAX);
2043         return false;
2044     }
2045 
2046     if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
2047         dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
2048             channel_id, IPA_COMMAND_TRANS_TRE_MAX,
2049             channel_data->tlv_count);
2050         return false;
2051     }
2052 
2053     /* We have to allow at least one maximally-sized transaction to
2054      * be outstanding (which would use tlv_count TREs).  Given how
2055      * gsi_channel_tre_max() is computed, tre_count has to be almost
2056      * twice the TLV FIFO size to satisfy this requirement.
2057      */
2058     if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
2059         dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2060             channel_id, channel_data->tlv_count,
2061             channel_data->tre_count);
2062         return false;
2063     }
2064 
2065     if (!is_power_of_2(channel_data->tre_count)) {
2066         dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2067             channel_id, channel_data->tre_count);
2068         return false;
2069     }
2070 
2071     if (!is_power_of_2(channel_data->event_count)) {
2072         dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2073             channel_id, channel_data->event_count);
2074         return false;
2075     }
2076 
2077     return true;
2078 }
2079 
2080 /* Init function for a single channel */
2081 static int gsi_channel_init_one(struct gsi *gsi,
2082                 const struct ipa_gsi_endpoint_data *data,
2083                 bool command)
2084 {
2085     struct gsi_channel *channel;
2086     u32 tre_count;
2087     int ret;
2088 
2089     if (!gsi_channel_data_valid(gsi, command, data))
2090         return -EINVAL;
2091 
2092     /* Worst case we need an event for every outstanding TRE */
2093     if (data->channel.tre_count > data->channel.event_count) {
2094         tre_count = data->channel.event_count;
2095         dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2096              data->channel_id, tre_count);
2097     } else {
2098         tre_count = data->channel.tre_count;
2099     }
2100 
2101     channel = &gsi->channel[data->channel_id];
2102     memset(channel, 0, sizeof(*channel));
2103 
2104     channel->gsi = gsi;
2105     channel->toward_ipa = data->toward_ipa;
2106     channel->command = command;
2107     channel->trans_tre_max = data->channel.tlv_count;
2108     channel->tre_count = tre_count;
2109     channel->event_count = data->channel.event_count;
2110 
2111     ret = gsi_channel_evt_ring_init(channel);
2112     if (ret)
2113         goto err_clear_gsi;
2114 
2115     ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2116     if (ret) {
2117         dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2118             ret, data->channel_id);
2119         goto err_channel_evt_ring_exit;
2120     }
2121 
2122     ret = gsi_channel_trans_init(gsi, data->channel_id);
2123     if (ret)
2124         goto err_ring_free;
2125 
2126     if (command) {
2127         u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2128 
2129         ret = ipa_cmd_pool_init(channel, tre_max);
2130     }
2131     if (!ret)
2132         return 0;   /* Success! */
2133 
2134     gsi_channel_trans_exit(channel);
2135 err_ring_free:
2136     gsi_ring_free(gsi, &channel->tre_ring);
2137 err_channel_evt_ring_exit:
2138     gsi_channel_evt_ring_exit(channel);
2139 err_clear_gsi:
2140     channel->gsi = NULL;    /* Mark it not (fully) initialized */
2141 
2142     return ret;
2143 }
2144 
2145 /* Inverse of gsi_channel_init_one() */
2146 static void gsi_channel_exit_one(struct gsi_channel *channel)
2147 {
2148     if (!gsi_channel_initialized(channel))
2149         return;
2150 
2151     if (channel->command)
2152         ipa_cmd_pool_exit(channel);
2153     gsi_channel_trans_exit(channel);
2154     gsi_ring_free(channel->gsi, &channel->tre_ring);
2155     gsi_channel_evt_ring_exit(channel);
2156 }
2157 
2158 /* Init function for channels */
2159 static int gsi_channel_init(struct gsi *gsi, u32 count,
2160                 const struct ipa_gsi_endpoint_data *data)
2161 {
2162     bool modem_alloc;
2163     int ret = 0;
2164     u32 i;
2165 
2166     /* IPA v4.2 requires the AP to allocate channels for the modem */
2167     modem_alloc = gsi->version == IPA_VERSION_4_2;
2168 
2169     gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2170     gsi->ieob_enabled_bitmap = 0;
2171 
2172     /* The endpoint data array is indexed by endpoint name */
2173     for (i = 0; i < count; i++) {
2174         bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2175 
2176         if (ipa_gsi_endpoint_data_empty(&data[i]))
2177             continue;   /* Skip over empty slots */
2178 
2179         /* Mark modem channels to be allocated (hardware workaround) */
2180         if (data[i].ee_id == GSI_EE_MODEM) {
2181             if (modem_alloc)
2182                 gsi->modem_channel_bitmap |=
2183                         BIT(data[i].channel_id);
2184             continue;
2185         }
2186 
2187         ret = gsi_channel_init_one(gsi, &data[i], command);
2188         if (ret)
2189             goto err_unwind;
2190     }
2191 
2192     return ret;
2193 
2194 err_unwind:
2195     while (i--) {
2196         if (ipa_gsi_endpoint_data_empty(&data[i]))
2197             continue;
2198         if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2199             gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2200             continue;
2201         }
2202         gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2203     }
2204 
2205     return ret;
2206 }
2207 
2208 /* Inverse of gsi_channel_init() */
2209 static void gsi_channel_exit(struct gsi *gsi)
2210 {
2211     u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2212 
2213     do
2214         gsi_channel_exit_one(&gsi->channel[channel_id]);
2215     while (channel_id--);
2216     gsi->modem_channel_bitmap = 0;
2217 }
2218 
2219 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2220 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2221          enum ipa_version version, u32 count,
2222          const struct ipa_gsi_endpoint_data *data)
2223 {
2224     struct device *dev = &pdev->dev;
2225     struct resource *res;
2226     resource_size_t size;
2227     u32 adjust;
2228     int ret;
2229 
2230     gsi_validate_build();
2231 
2232     gsi->dev = dev;
2233     gsi->version = version;
2234 
2235     /* GSI uses NAPI on all channels.  Create a dummy network device
2236      * for the channel NAPI contexts to be associated with.
2237      */
2238     init_dummy_netdev(&gsi->dummy_dev);
2239 
2240     /* Get GSI memory range and map it */
2241     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2242     if (!res) {
2243         dev_err(dev, "DT error getting \"gsi\" memory property\n");
2244         return -ENODEV;
2245     }
2246 
2247     size = resource_size(res);
2248     if (res->start > U32_MAX || size > U32_MAX - res->start) {
2249         dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2250         return -EINVAL;
2251     }
2252 
2253     /* Make sure we can make our pointer adjustment if necessary */
2254     adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2255     if (res->start < adjust) {
2256         dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2257             adjust);
2258         return -EINVAL;
2259     }
2260 
2261     gsi->virt_raw = ioremap(res->start, size);
2262     if (!gsi->virt_raw) {
2263         dev_err(dev, "unable to remap \"gsi\" memory\n");
2264         return -ENOMEM;
2265     }
2266     /* Most registers are accessed using an adjusted register range */
2267     gsi->virt = gsi->virt_raw - adjust;
2268 
2269     init_completion(&gsi->completion);
2270 
2271     ret = gsi_irq_init(gsi, pdev);  /* No matching exit required */
2272     if (ret)
2273         goto err_iounmap;
2274 
2275     ret = gsi_channel_init(gsi, count, data);
2276     if (ret)
2277         goto err_iounmap;
2278 
2279     mutex_init(&gsi->mutex);
2280 
2281     return 0;
2282 
2283 err_iounmap:
2284     iounmap(gsi->virt_raw);
2285 
2286     return ret;
2287 }
2288 
2289 /* Inverse of gsi_init() */
2290 void gsi_exit(struct gsi *gsi)
2291 {
2292     mutex_destroy(&gsi->mutex);
2293     gsi_channel_exit(gsi);
2294     iounmap(gsi->virt_raw);
2295 }
2296 
2297 /* The maximum number of outstanding TREs on a channel.  This limits
2298  * a channel's maximum number of transactions outstanding (worst case
2299  * is one TRE per transaction).
2300  *
2301  * The absolute limit is the number of TREs in the channel's TRE ring,
2302  * and in theory we should be able use all of them.  But in practice,
2303  * doing that led to the hardware reporting exhaustion of event ring
2304  * slots for writing completion information.  So the hardware limit
2305  * would be (tre_count - 1).
2306  *
2307  * We reduce it a bit further though.  Transaction resource pools are
2308  * sized to be a little larger than this maximum, to allow resource
2309  * allocations to always be contiguous.  The number of entries in a
2310  * TRE ring buffer is a power of 2, and the extra resources in a pool
2311  * tends to nearly double the memory allocated for it.  Reducing the
2312  * maximum number of outstanding TREs allows the number of entries in
2313  * a pool to avoid crossing that power-of-2 boundary, and this can
2314  * substantially reduce pool memory requirements.  The number we
2315  * reduce it by matches the number added in gsi_trans_pool_init().
2316  */
2317 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2318 {
2319     struct gsi_channel *channel = &gsi->channel[channel_id];
2320 
2321     /* Hardware limit is channel->tre_count - 1 */
2322     return channel->tre_count - (channel->trans_tre_max - 1);
2323 }