0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/bits.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/mutex.h>
0011 #include <linux/completion.h>
0012 #include <linux/io.h>
0013 #include <linux/bug.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/netdevice.h>
0017
0018 #include "gsi.h"
0019 #include "gsi_reg.h"
0020 #include "gsi_private.h"
0021 #include "gsi_trans.h"
0022 #include "ipa_gsi.h"
0023 #include "ipa_data.h"
0024 #include "ipa_version.h"
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 #define GSI_EVT_RING_INT_MODT (32 * 1)
0091
0092 #define GSI_CMD_TIMEOUT 50
0093
0094 #define GSI_CHANNEL_STOP_RETRIES 10
0095 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10
0096 #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5
0097
0098 #define GSI_MHI_EVENT_ID_START 10
0099 #define GSI_MHI_EVENT_ID_END 16
0100
0101 #define GSI_ISR_MAX_ITER 50
0102
0103
0104 struct gsi_event {
0105 __le64 xfer_ptr;
0106 __le16 len;
0107 u8 reserved1;
0108 u8 code;
0109 __le16 reserved2;
0110 u8 type;
0111 u8 chid;
0112 };
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 struct gsi_channel_scratch_gpi {
0126 u64 reserved1;
0127 u16 reserved2;
0128 u16 max_outstanding_tre;
0129 u16 reserved3;
0130 u16 outstanding_threshold;
0131 };
0132
0133
0134
0135
0136
0137
0138 union gsi_channel_scratch {
0139 struct gsi_channel_scratch_gpi gpi;
0140 struct {
0141 u32 word1;
0142 u32 word2;
0143 u32 word3;
0144 u32 word4;
0145 } data;
0146 };
0147
0148
0149 static void gsi_validate_build(void)
0150 {
0151
0152 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
0153
0154
0155
0156
0157
0158 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
0159
0160
0161
0162
0163
0164 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
0165
0166
0167 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
0168
0169
0170 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
0171 }
0172
0173
0174 static u32 gsi_channel_id(struct gsi_channel *channel)
0175 {
0176 return channel - &channel->gsi->channel[0];
0177 }
0178
0179
0180 static bool gsi_channel_initialized(struct gsi_channel *channel)
0181 {
0182 return !!channel->gsi;
0183 }
0184
0185
0186 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
0187 {
0188 gsi->type_enabled_bitmap = val;
0189 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
0190 }
0191
0192 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
0193 {
0194 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
0195 }
0196
0197 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
0198 {
0199 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
0200 }
0201
0202
0203
0204
0205
0206
0207 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
0208 {
0209 u32 val = BIT(evt_ring_id);
0210
0211
0212
0213
0214
0215 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
0216
0217 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
0218 gsi_irq_type_enable(gsi, GSI_EV_CTRL);
0219 }
0220
0221
0222 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
0223 {
0224 gsi_irq_type_disable(gsi, GSI_EV_CTRL);
0225 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
0226 }
0227
0228
0229
0230
0231
0232
0233 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
0234 {
0235 u32 val = BIT(channel_id);
0236
0237
0238
0239
0240
0241 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
0242
0243 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
0244 gsi_irq_type_enable(gsi, GSI_CH_CTRL);
0245 }
0246
0247
0248 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
0249 {
0250 gsi_irq_type_disable(gsi, GSI_CH_CTRL);
0251 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
0252 }
0253
0254 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
0255 {
0256 bool enable_ieob = !gsi->ieob_enabled_bitmap;
0257 u32 val;
0258
0259 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
0260 val = gsi->ieob_enabled_bitmap;
0261 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
0262
0263
0264 if (enable_ieob)
0265 gsi_irq_type_enable(gsi, GSI_IEOB);
0266 }
0267
0268 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
0269 {
0270 u32 val;
0271
0272 gsi->ieob_enabled_bitmap &= ~event_mask;
0273
0274
0275 if (!gsi->ieob_enabled_bitmap)
0276 gsi_irq_type_disable(gsi, GSI_IEOB);
0277
0278 val = gsi->ieob_enabled_bitmap;
0279 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
0280 }
0281
0282 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
0283 {
0284 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
0285 }
0286
0287
0288 static void gsi_irq_enable(struct gsi *gsi)
0289 {
0290 u32 val;
0291
0292
0293
0294
0295 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
0296 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
0297
0298
0299
0300
0301
0302
0303 val = BIT(BUS_ERROR);
0304 val |= BIT(CMD_FIFO_OVRFLOW);
0305 val |= BIT(MCS_STACK_OVRFLOW);
0306 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
0307 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
0308 }
0309
0310
0311 static void gsi_irq_disable(struct gsi *gsi)
0312 {
0313 gsi_irq_type_update(gsi, 0);
0314
0315
0316 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
0317 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
0318 }
0319
0320
0321 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
0322 {
0323
0324 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
0325 }
0326
0327
0328 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
0329 {
0330 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
0331 }
0332
0333
0334 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
0335 {
0336 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
0337 }
0338
0339
0340
0341
0342
0343 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
0344 {
0345 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
0346 struct completion *completion = &gsi->completion;
0347
0348 reinit_completion(completion);
0349
0350 iowrite32(val, gsi->virt + reg);
0351
0352 return !!wait_for_completion_timeout(completion, timeout);
0353 }
0354
0355
0356 static enum gsi_evt_ring_state
0357 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
0358 {
0359 u32 val;
0360
0361 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
0362
0363 return u32_get_bits(val, EV_CHSTATE_FMASK);
0364 }
0365
0366
0367 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
0368 enum gsi_evt_cmd_opcode opcode)
0369 {
0370 struct device *dev = gsi->dev;
0371 bool timeout;
0372 u32 val;
0373
0374
0375 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
0376
0377 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
0378 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
0379
0380 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
0381
0382 gsi_irq_ev_ctrl_disable(gsi);
0383
0384 if (!timeout)
0385 return;
0386
0387 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
0388 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
0389 }
0390
0391
0392 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
0393 {
0394 enum gsi_evt_ring_state state;
0395
0396
0397 state = gsi_evt_ring_state(gsi, evt_ring_id);
0398 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
0399 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
0400 evt_ring_id, state);
0401 return -EINVAL;
0402 }
0403
0404 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
0405
0406
0407 state = gsi_evt_ring_state(gsi, evt_ring_id);
0408 if (state == GSI_EVT_RING_STATE_ALLOCATED)
0409 return 0;
0410
0411 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
0412 evt_ring_id, state);
0413
0414 return -EIO;
0415 }
0416
0417
0418 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
0419 {
0420 enum gsi_evt_ring_state state;
0421
0422 state = gsi_evt_ring_state(gsi, evt_ring_id);
0423 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
0424 state != GSI_EVT_RING_STATE_ERROR) {
0425 dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
0426 evt_ring_id, state);
0427 return;
0428 }
0429
0430 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
0431
0432
0433 state = gsi_evt_ring_state(gsi, evt_ring_id);
0434 if (state == GSI_EVT_RING_STATE_ALLOCATED)
0435 return;
0436
0437 dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
0438 evt_ring_id, state);
0439 }
0440
0441
0442 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
0443 {
0444 enum gsi_evt_ring_state state;
0445
0446 state = gsi_evt_ring_state(gsi, evt_ring_id);
0447 if (state != GSI_EVT_RING_STATE_ALLOCATED) {
0448 dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
0449 evt_ring_id, state);
0450 return;
0451 }
0452
0453 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
0454
0455
0456 state = gsi_evt_ring_state(gsi, evt_ring_id);
0457 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
0458 return;
0459
0460 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
0461 evt_ring_id, state);
0462 }
0463
0464
0465 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
0466 {
0467 u32 channel_id = gsi_channel_id(channel);
0468 void __iomem *virt = channel->gsi->virt;
0469 u32 val;
0470
0471 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
0472
0473 return u32_get_bits(val, CHSTATE_FMASK);
0474 }
0475
0476
0477 static void
0478 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
0479 {
0480 u32 channel_id = gsi_channel_id(channel);
0481 struct gsi *gsi = channel->gsi;
0482 struct device *dev = gsi->dev;
0483 bool timeout;
0484 u32 val;
0485
0486
0487 gsi_irq_ch_ctrl_enable(gsi, channel_id);
0488
0489 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
0490 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
0491 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
0492
0493 gsi_irq_ch_ctrl_disable(gsi);
0494
0495 if (!timeout)
0496 return;
0497
0498 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
0499 opcode, channel_id, gsi_channel_state(channel));
0500 }
0501
0502
0503 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
0504 {
0505 struct gsi_channel *channel = &gsi->channel[channel_id];
0506 struct device *dev = gsi->dev;
0507 enum gsi_channel_state state;
0508
0509
0510 state = gsi_channel_state(channel);
0511 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
0512 dev_err(dev, "channel %u bad state %u before alloc\n",
0513 channel_id, state);
0514 return -EINVAL;
0515 }
0516
0517 gsi_channel_command(channel, GSI_CH_ALLOCATE);
0518
0519
0520 state = gsi_channel_state(channel);
0521 if (state == GSI_CHANNEL_STATE_ALLOCATED)
0522 return 0;
0523
0524 dev_err(dev, "channel %u bad state %u after alloc\n",
0525 channel_id, state);
0526
0527 return -EIO;
0528 }
0529
0530
0531 static int gsi_channel_start_command(struct gsi_channel *channel)
0532 {
0533 struct device *dev = channel->gsi->dev;
0534 enum gsi_channel_state state;
0535
0536 state = gsi_channel_state(channel);
0537 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
0538 state != GSI_CHANNEL_STATE_STOPPED) {
0539 dev_err(dev, "channel %u bad state %u before start\n",
0540 gsi_channel_id(channel), state);
0541 return -EINVAL;
0542 }
0543
0544 gsi_channel_command(channel, GSI_CH_START);
0545
0546
0547 state = gsi_channel_state(channel);
0548 if (state == GSI_CHANNEL_STATE_STARTED)
0549 return 0;
0550
0551 dev_err(dev, "channel %u bad state %u after start\n",
0552 gsi_channel_id(channel), state);
0553
0554 return -EIO;
0555 }
0556
0557
0558 static int gsi_channel_stop_command(struct gsi_channel *channel)
0559 {
0560 struct device *dev = channel->gsi->dev;
0561 enum gsi_channel_state state;
0562
0563 state = gsi_channel_state(channel);
0564
0565
0566
0567
0568 if (state == GSI_CHANNEL_STATE_STOPPED)
0569 return 0;
0570
0571 if (state != GSI_CHANNEL_STATE_STARTED &&
0572 state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
0573 dev_err(dev, "channel %u bad state %u before stop\n",
0574 gsi_channel_id(channel), state);
0575 return -EINVAL;
0576 }
0577
0578 gsi_channel_command(channel, GSI_CH_STOP);
0579
0580
0581 state = gsi_channel_state(channel);
0582 if (state == GSI_CHANNEL_STATE_STOPPED)
0583 return 0;
0584
0585
0586 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
0587 return -EAGAIN;
0588
0589 dev_err(dev, "channel %u bad state %u after stop\n",
0590 gsi_channel_id(channel), state);
0591
0592 return -EIO;
0593 }
0594
0595
0596 static void gsi_channel_reset_command(struct gsi_channel *channel)
0597 {
0598 struct device *dev = channel->gsi->dev;
0599 enum gsi_channel_state state;
0600
0601
0602 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
0603
0604 state = gsi_channel_state(channel);
0605 if (state != GSI_CHANNEL_STATE_STOPPED &&
0606 state != GSI_CHANNEL_STATE_ERROR) {
0607
0608 if (state != GSI_CHANNEL_STATE_ALLOCATED)
0609 dev_err(dev, "channel %u bad state %u before reset\n",
0610 gsi_channel_id(channel), state);
0611 return;
0612 }
0613
0614 gsi_channel_command(channel, GSI_CH_RESET);
0615
0616
0617 state = gsi_channel_state(channel);
0618 if (state != GSI_CHANNEL_STATE_ALLOCATED)
0619 dev_err(dev, "channel %u bad state %u after reset\n",
0620 gsi_channel_id(channel), state);
0621 }
0622
0623
0624 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
0625 {
0626 struct gsi_channel *channel = &gsi->channel[channel_id];
0627 struct device *dev = gsi->dev;
0628 enum gsi_channel_state state;
0629
0630 state = gsi_channel_state(channel);
0631 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
0632 dev_err(dev, "channel %u bad state %u before dealloc\n",
0633 channel_id, state);
0634 return;
0635 }
0636
0637 gsi_channel_command(channel, GSI_CH_DE_ALLOC);
0638
0639
0640 state = gsi_channel_state(channel);
0641
0642 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
0643 dev_err(dev, "channel %u bad state %u after dealloc\n",
0644 channel_id, state);
0645 }
0646
0647
0648
0649
0650
0651
0652 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
0653 {
0654 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
0655 u32 val;
0656
0657 ring->index = index;
0658
0659
0660 val = gsi_ring_addr(ring, (index - 1) % ring->count);
0661 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
0662 }
0663
0664
0665 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
0666 {
0667 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
0668 struct gsi_ring *ring = &evt_ring->ring;
0669 size_t size;
0670 u32 val;
0671
0672
0673 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
0674 val |= EV_INTYPE_FMASK;
0675 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
0676 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
0677
0678 size = ring->count * GSI_RING_ELEMENT_SIZE;
0679 val = ev_r_length_encoded(gsi->version, size);
0680 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
0681
0682
0683
0684
0685
0686 val = lower_32_bits(ring->addr);
0687 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
0688 val = upper_32_bits(ring->addr);
0689 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
0690
0691
0692 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
0693 val |= u32_encode_bits(1, MODC_FMASK);
0694 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
0695
0696
0697 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
0698 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
0699 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
0700
0701
0702 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
0703 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
0704
0705
0706 gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
0707 }
0708
0709
0710 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
0711 {
0712 struct gsi_trans_info *trans_info = &channel->trans_info;
0713 const struct list_head *list;
0714 struct gsi_trans *trans;
0715
0716 spin_lock_bh(&trans_info->spinlock);
0717
0718
0719
0720
0721 if (channel->toward_ipa) {
0722 list = &trans_info->alloc;
0723 if (!list_empty(list))
0724 goto done;
0725 list = &trans_info->committed;
0726 if (!list_empty(list))
0727 goto done;
0728 list = &trans_info->pending;
0729 if (!list_empty(list))
0730 goto done;
0731 }
0732
0733
0734
0735
0736 list = &trans_info->complete;
0737 if (!list_empty(list))
0738 goto done;
0739 list = &trans_info->polled;
0740 if (list_empty(list))
0741 list = NULL;
0742 done:
0743 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
0744
0745
0746 if (trans)
0747 refcount_inc(&trans->refcount);
0748
0749 spin_unlock_bh(&trans_info->spinlock);
0750
0751 return trans;
0752 }
0753
0754
0755 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
0756 {
0757 struct gsi_trans *trans;
0758
0759
0760 trans = gsi_channel_trans_last(channel);
0761 if (trans) {
0762 wait_for_completion(&trans->completion);
0763 gsi_trans_free(trans);
0764 }
0765 }
0766
0767
0768 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
0769 {
0770 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
0771 u32 channel_id = gsi_channel_id(channel);
0772 union gsi_channel_scratch scr = { };
0773 struct gsi_channel_scratch_gpi *gpi;
0774 struct gsi *gsi = channel->gsi;
0775 u32 wrr_weight = 0;
0776 u32 val;
0777
0778
0779 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
0780 if (channel->toward_ipa)
0781 val |= CHTYPE_DIR_FMASK;
0782 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
0783 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
0784 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
0785
0786 val = r_length_encoded(gsi->version, size);
0787 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
0788
0789
0790
0791
0792
0793 val = lower_32_bits(channel->tre_ring.addr);
0794 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
0795 val = upper_32_bits(channel->tre_ring.addr);
0796 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
0797
0798
0799 if (channel->command)
0800 wrr_weight = field_max(WRR_WEIGHT_FMASK);
0801 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
0802
0803
0804
0805
0806 if (gsi->version < IPA_VERSION_4_0 && doorbell)
0807 val |= USE_DB_ENG_FMASK;
0808
0809
0810
0811
0812 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
0813
0814 if (gsi->version < IPA_VERSION_4_5)
0815 val |= USE_ESCAPE_BUF_ONLY_FMASK;
0816 else
0817 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
0818 PREFETCH_MODE_FMASK);
0819 }
0820
0821 if (gsi->version >= IPA_VERSION_4_9)
0822 val |= DB_IN_BYTES;
0823
0824 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
0825
0826
0827 gpi = &scr.gpi;
0828 gpi->max_outstanding_tre = channel->trans_tre_max *
0829 GSI_RING_ELEMENT_SIZE;
0830 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
0831
0832 val = scr.data.word1;
0833 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
0834
0835 val = scr.data.word2;
0836 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
0837
0838 val = scr.data.word3;
0839 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
0840
0841
0842
0843
0844
0845 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
0846 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
0847 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
0848
0849
0850 }
0851
0852 static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
0853 {
0854 struct gsi *gsi = channel->gsi;
0855 int ret;
0856
0857
0858 if (resume && gsi->version < IPA_VERSION_4_0)
0859 return 0;
0860
0861 mutex_lock(&gsi->mutex);
0862
0863 ret = gsi_channel_start_command(channel);
0864
0865 mutex_unlock(&gsi->mutex);
0866
0867 return ret;
0868 }
0869
0870
0871 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
0872 {
0873 struct gsi_channel *channel = &gsi->channel[channel_id];
0874 int ret;
0875
0876
0877 napi_enable(&channel->napi);
0878 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
0879
0880 ret = __gsi_channel_start(channel, false);
0881 if (ret) {
0882 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
0883 napi_disable(&channel->napi);
0884 }
0885
0886 return ret;
0887 }
0888
0889 static int gsi_channel_stop_retry(struct gsi_channel *channel)
0890 {
0891 u32 retries = GSI_CHANNEL_STOP_RETRIES;
0892 int ret;
0893
0894 do {
0895 ret = gsi_channel_stop_command(channel);
0896 if (ret != -EAGAIN)
0897 break;
0898 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
0899 } while (retries--);
0900
0901 return ret;
0902 }
0903
0904 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
0905 {
0906 struct gsi *gsi = channel->gsi;
0907 int ret;
0908
0909
0910 gsi_channel_trans_quiesce(channel);
0911
0912
0913 if (suspend && gsi->version < IPA_VERSION_4_0)
0914 return 0;
0915
0916 mutex_lock(&gsi->mutex);
0917
0918 ret = gsi_channel_stop_retry(channel);
0919
0920 mutex_unlock(&gsi->mutex);
0921
0922 return ret;
0923 }
0924
0925
0926 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
0927 {
0928 struct gsi_channel *channel = &gsi->channel[channel_id];
0929 int ret;
0930
0931 ret = __gsi_channel_stop(channel, false);
0932 if (ret)
0933 return ret;
0934
0935
0936 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
0937 napi_disable(&channel->napi);
0938
0939 return 0;
0940 }
0941
0942
0943 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
0944 {
0945 struct gsi_channel *channel = &gsi->channel[channel_id];
0946
0947 mutex_lock(&gsi->mutex);
0948
0949 gsi_channel_reset_command(channel);
0950
0951 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
0952 gsi_channel_reset_command(channel);
0953
0954
0955 channel->tre_ring.index = 0;
0956 gsi_channel_program(channel, doorbell);
0957 gsi_channel_trans_cancel_pending(channel);
0958
0959 mutex_unlock(&gsi->mutex);
0960 }
0961
0962
0963 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
0964 {
0965 struct gsi_channel *channel = &gsi->channel[channel_id];
0966 int ret;
0967
0968 ret = __gsi_channel_stop(channel, true);
0969 if (ret)
0970 return ret;
0971
0972
0973 napi_synchronize(&channel->napi);
0974
0975 return 0;
0976 }
0977
0978
0979 int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
0980 {
0981 struct gsi_channel *channel = &gsi->channel[channel_id];
0982
0983 return __gsi_channel_start(channel, true);
0984 }
0985
0986
0987 void gsi_suspend(struct gsi *gsi)
0988 {
0989 disable_irq(gsi->irq);
0990 }
0991
0992
0993 void gsi_resume(struct gsi *gsi)
0994 {
0995 enable_irq(gsi->irq);
0996 }
0997
0998 void gsi_trans_tx_committed(struct gsi_trans *trans)
0999 {
1000 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
1001
1002 channel->trans_count++;
1003 channel->byte_count += trans->len;
1004
1005 trans->trans_count = channel->trans_count;
1006 trans->byte_count = channel->byte_count;
1007 }
1008
1009 void gsi_trans_tx_queued(struct gsi_trans *trans)
1010 {
1011 u32 channel_id = trans->channel_id;
1012 struct gsi *gsi = trans->gsi;
1013 struct gsi_channel *channel;
1014 u32 trans_count;
1015 u32 byte_count;
1016
1017 channel = &gsi->channel[channel_id];
1018
1019 byte_count = channel->byte_count - channel->queued_byte_count;
1020 trans_count = channel->trans_count - channel->queued_trans_count;
1021 channel->queued_byte_count = channel->byte_count;
1022 channel->queued_trans_count = channel->trans_count;
1023
1024 ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 static void gsi_trans_tx_completed(struct gsi_trans *trans)
1043 {
1044 u32 channel_id = trans->channel_id;
1045 struct gsi *gsi = trans->gsi;
1046 struct gsi_channel *channel;
1047 u32 trans_count;
1048 u32 byte_count;
1049
1050 channel = &gsi->channel[channel_id];
1051 trans_count = trans->trans_count - channel->compl_trans_count;
1052 byte_count = trans->byte_count - channel->compl_byte_count;
1053
1054 channel->compl_trans_count += trans_count;
1055 channel->compl_byte_count += byte_count;
1056
1057 ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
1058 }
1059
1060
1061 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1062 {
1063 u32 channel_mask;
1064
1065 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1066 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1067
1068 while (channel_mask) {
1069 u32 channel_id = __ffs(channel_mask);
1070
1071 channel_mask ^= BIT(channel_id);
1072
1073 complete(&gsi->completion);
1074 }
1075 }
1076
1077
1078 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1079 {
1080 u32 event_mask;
1081
1082 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1083 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1084
1085 while (event_mask) {
1086 u32 evt_ring_id = __ffs(event_mask);
1087
1088 event_mask ^= BIT(evt_ring_id);
1089
1090 complete(&gsi->completion);
1091 }
1092 }
1093
1094
1095 static void
1096 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1097 {
1098 if (code == GSI_OUT_OF_RESOURCES) {
1099 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1100 complete(&gsi->completion);
1101 return;
1102 }
1103
1104
1105 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1106 channel_id, err_ee, code);
1107 }
1108
1109
1110 static void
1111 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1112 {
1113 if (code == GSI_OUT_OF_RESOURCES) {
1114 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1115 u32 channel_id = gsi_channel_id(evt_ring->channel);
1116
1117 complete(&gsi->completion);
1118 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1119 channel_id);
1120 return;
1121 }
1122
1123
1124 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1125 evt_ring_id, err_ee, code);
1126 }
1127
1128
1129 static void gsi_isr_glob_err(struct gsi *gsi)
1130 {
1131 enum gsi_err_type type;
1132 enum gsi_err_code code;
1133 u32 which;
1134 u32 val;
1135 u32 ee;
1136
1137
1138 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1139 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1140 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1141
1142 ee = u32_get_bits(val, ERR_EE_FMASK);
1143 type = u32_get_bits(val, ERR_TYPE_FMASK);
1144 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1145 code = u32_get_bits(val, ERR_CODE_FMASK);
1146
1147 if (type == GSI_ERR_TYPE_CHAN)
1148 gsi_isr_glob_chan_err(gsi, ee, which, code);
1149 else if (type == GSI_ERR_TYPE_EVT)
1150 gsi_isr_glob_evt_err(gsi, ee, which, code);
1151 else
1152 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1153 }
1154
1155
1156 static void gsi_isr_gp_int1(struct gsi *gsi)
1157 {
1158 u32 result;
1159 u32 val;
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1181 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1182
1183 switch (result) {
1184 case GENERIC_EE_SUCCESS:
1185 case GENERIC_EE_INCORRECT_CHANNEL_STATE:
1186 gsi->result = 0;
1187 break;
1188
1189 case GENERIC_EE_RETRY:
1190 gsi->result = -EAGAIN;
1191 break;
1192
1193 default:
1194 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1195 gsi->result = -EIO;
1196 break;
1197 }
1198
1199 complete(&gsi->completion);
1200 }
1201
1202
1203 static void gsi_isr_glob_ee(struct gsi *gsi)
1204 {
1205 u32 val;
1206
1207 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1208
1209 if (val & BIT(ERROR_INT))
1210 gsi_isr_glob_err(gsi);
1211
1212 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1213
1214 val &= ~BIT(ERROR_INT);
1215
1216 if (val & BIT(GP_INT1)) {
1217 val ^= BIT(GP_INT1);
1218 gsi_isr_gp_int1(gsi);
1219 }
1220
1221 if (val)
1222 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1223 }
1224
1225
1226 static void gsi_isr_ieob(struct gsi *gsi)
1227 {
1228 u32 event_mask;
1229
1230 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1231 gsi_irq_ieob_disable(gsi, event_mask);
1232 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1233
1234 while (event_mask) {
1235 u32 evt_ring_id = __ffs(event_mask);
1236
1237 event_mask ^= BIT(evt_ring_id);
1238
1239 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1240 }
1241 }
1242
1243
1244 static void gsi_isr_general(struct gsi *gsi)
1245 {
1246 struct device *dev = gsi->dev;
1247 u32 val;
1248
1249 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1250 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1251
1252 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 static irqreturn_t gsi_isr(int irq, void *dev_id)
1264 {
1265 struct gsi *gsi = dev_id;
1266 u32 intr_mask;
1267 u32 cnt = 0;
1268
1269
1270 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1271
1272 do {
1273 u32 gsi_intr = BIT(__ffs(intr_mask));
1274
1275 intr_mask ^= gsi_intr;
1276
1277 switch (gsi_intr) {
1278 case BIT(GSI_CH_CTRL):
1279 gsi_isr_chan_ctrl(gsi);
1280 break;
1281 case BIT(GSI_EV_CTRL):
1282 gsi_isr_evt_ctrl(gsi);
1283 break;
1284 case BIT(GSI_GLOB_EE):
1285 gsi_isr_glob_ee(gsi);
1286 break;
1287 case BIT(GSI_IEOB):
1288 gsi_isr_ieob(gsi);
1289 break;
1290 case BIT(GSI_GENERAL):
1291 gsi_isr_general(gsi);
1292 break;
1293 default:
1294 dev_err(gsi->dev,
1295 "unrecognized interrupt type 0x%08x\n",
1296 gsi_intr);
1297 break;
1298 }
1299 } while (intr_mask);
1300
1301 if (++cnt > GSI_ISR_MAX_ITER) {
1302 dev_err(gsi->dev, "interrupt flood\n");
1303 break;
1304 }
1305 }
1306
1307 return IRQ_HANDLED;
1308 }
1309
1310
1311 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1312 {
1313 int ret;
1314
1315 ret = platform_get_irq_byname(pdev, "gsi");
1316 if (ret <= 0)
1317 return ret ? : -EINVAL;
1318
1319 gsi->irq = ret;
1320
1321 return 0;
1322 }
1323
1324
1325 static struct gsi_trans *
1326 gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
1327 {
1328 u32 channel_id = event->chid;
1329 struct gsi_channel *channel;
1330 struct gsi_trans *trans;
1331 u32 tre_offset;
1332 u32 tre_index;
1333
1334 channel = &gsi->channel[channel_id];
1335 if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
1336 return NULL;
1337
1338
1339 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1340 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1341
1342 trans = gsi_channel_trans_mapped(channel, tre_index);
1343
1344 if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
1345 return NULL;
1346
1347 return trans;
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
1375 {
1376 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1377 struct gsi_ring *ring = &evt_ring->ring;
1378 struct gsi_event *event_done;
1379 struct gsi_event *event;
1380 u32 event_avail;
1381 u32 old_index;
1382
1383
1384
1385
1386
1387
1388
1389
1390 old_index = ring->index;
1391 event = gsi_ring_virt(ring, old_index);
1392
1393
1394
1395
1396 event_avail = ring->count - old_index % ring->count;
1397 event_done = gsi_ring_virt(ring, index);
1398 do {
1399 struct gsi_trans *trans;
1400
1401 trans = gsi_event_trans(gsi, event);
1402 if (!trans)
1403 return;
1404
1405 if (trans->direction == DMA_FROM_DEVICE)
1406 trans->len = __le16_to_cpu(event->len);
1407 else
1408 gsi_trans_tx_completed(trans);
1409
1410 gsi_trans_move_complete(trans);
1411
1412
1413 if (--event_avail)
1414 event++;
1415 else
1416 event = gsi_ring_virt(ring, 0);
1417 } while (event != event_done);
1418
1419
1420 gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1421 }
1422
1423
1424 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1425 {
1426 u32 size = count * GSI_RING_ELEMENT_SIZE;
1427 struct device *dev = gsi->dev;
1428 dma_addr_t addr;
1429
1430
1431
1432
1433
1434 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1435 if (!ring->virt)
1436 return -ENOMEM;
1437
1438 ring->addr = addr;
1439 ring->count = count;
1440 ring->index = 0;
1441
1442 return 0;
1443 }
1444
1445
1446 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1447 {
1448 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1449
1450 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1451 }
1452
1453
1454 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1455 {
1456 u32 evt_ring_id;
1457
1458 if (gsi->event_bitmap == ~0U) {
1459 dev_err(gsi->dev, "event rings exhausted\n");
1460 return -ENOSPC;
1461 }
1462
1463 evt_ring_id = ffz(gsi->event_bitmap);
1464 gsi->event_bitmap |= BIT(evt_ring_id);
1465
1466 return (int)evt_ring_id;
1467 }
1468
1469
1470 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1471 {
1472 gsi->event_bitmap &= ~BIT(evt_ring_id);
1473 }
1474
1475
1476 void gsi_channel_doorbell(struct gsi_channel *channel)
1477 {
1478 struct gsi_ring *tre_ring = &channel->tre_ring;
1479 u32 channel_id = gsi_channel_id(channel);
1480 struct gsi *gsi = channel->gsi;
1481 u32 val;
1482
1483
1484 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1485 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1486 }
1487
1488
1489 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1490 {
1491 u32 evt_ring_id = channel->evt_ring_id;
1492 struct gsi *gsi = channel->gsi;
1493 struct gsi_evt_ring *evt_ring;
1494 struct gsi_trans *trans;
1495 struct gsi_ring *ring;
1496 u32 offset;
1497 u32 index;
1498
1499 evt_ring = &gsi->evt_ring[evt_ring_id];
1500 ring = &evt_ring->ring;
1501
1502
1503
1504
1505 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1506 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1507 if (index == ring->index % ring->count)
1508 return NULL;
1509
1510
1511 trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
1512 if (!trans)
1513 return NULL;
1514
1515
1516
1517
1518
1519
1520 gsi_evt_ring_update(gsi, evt_ring_id, index);
1521
1522 return gsi_channel_trans_complete(channel);
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1538 {
1539 struct gsi_trans *trans;
1540
1541
1542 trans = gsi_channel_trans_complete(channel);
1543 if (!trans)
1544 trans = gsi_channel_update(channel);
1545
1546 if (trans)
1547 gsi_trans_move_polled(trans);
1548
1549 return trans;
1550 }
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1565 {
1566 struct gsi_channel *channel;
1567 int count;
1568
1569 channel = container_of(napi, struct gsi_channel, napi);
1570 for (count = 0; count < budget; count++) {
1571 struct gsi_trans *trans;
1572
1573 trans = gsi_channel_poll_one(channel);
1574 if (!trans)
1575 break;
1576 gsi_trans_complete(trans);
1577 }
1578
1579 if (count < budget && napi_complete(napi))
1580 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1581
1582 return count;
1583 }
1584
1585
1586
1587
1588
1589
1590 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1591 {
1592 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1593
1594 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1595
1596 return event_bitmap;
1597 }
1598
1599
1600 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1601 {
1602 struct gsi_channel *channel = &gsi->channel[channel_id];
1603 u32 evt_ring_id = channel->evt_ring_id;
1604 int ret;
1605
1606 if (!gsi_channel_initialized(channel))
1607 return 0;
1608
1609 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1610 if (ret)
1611 return ret;
1612
1613 gsi_evt_ring_program(gsi, evt_ring_id);
1614
1615 ret = gsi_channel_alloc_command(gsi, channel_id);
1616 if (ret)
1617 goto err_evt_ring_de_alloc;
1618
1619 gsi_channel_program(channel, true);
1620
1621 if (channel->toward_ipa)
1622 netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1623 gsi_channel_poll);
1624 else
1625 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1626 gsi_channel_poll, NAPI_POLL_WEIGHT);
1627
1628 return 0;
1629
1630 err_evt_ring_de_alloc:
1631
1632 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1633
1634 return ret;
1635 }
1636
1637
1638 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1639 {
1640 struct gsi_channel *channel = &gsi->channel[channel_id];
1641 u32 evt_ring_id = channel->evt_ring_id;
1642
1643 if (!gsi_channel_initialized(channel))
1644 return;
1645
1646 netif_napi_del(&channel->napi);
1647
1648 gsi_channel_de_alloc_command(gsi, channel_id);
1649 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1650 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1651 }
1652
1653
1654
1655
1656
1657 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1658 enum gsi_generic_cmd_opcode opcode,
1659 u8 params)
1660 {
1661 bool timeout;
1662 u32 val;
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 val = BIT(ERROR_INT) | BIT(GP_INT1);
1674 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1675
1676
1677 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1678 val &= ~GENERIC_EE_RESULT_FMASK;
1679 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1680
1681
1682 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1683 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1684 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1685 val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1686
1687 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1688
1689
1690 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1691
1692 if (!timeout)
1693 return gsi->result;
1694
1695 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1696 opcode, channel_id);
1697
1698 return -ETIMEDOUT;
1699 }
1700
1701 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1702 {
1703 return gsi_generic_command(gsi, channel_id,
1704 GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1705 }
1706
1707 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1708 {
1709 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1710 int ret;
1711
1712 do
1713 ret = gsi_generic_command(gsi, channel_id,
1714 GSI_GENERIC_HALT_CHANNEL, 0);
1715 while (ret == -EAGAIN && retries--);
1716
1717 if (ret)
1718 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1719 ret, channel_id);
1720 }
1721
1722
1723 void
1724 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1725 {
1726 u32 retries = 0;
1727 u32 command;
1728 int ret;
1729
1730 command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1731 : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1732
1733
1734
1735 if (!enable && gsi->version >= IPA_VERSION_4_11)
1736 retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1737
1738 do
1739 ret = gsi_generic_command(gsi, channel_id, command, 0);
1740 while (ret == -EAGAIN && retries--);
1741
1742 if (ret)
1743 dev_err(gsi->dev,
1744 "error %d %sabling mode channel %u flow control\n",
1745 ret, enable ? "en" : "dis", channel_id);
1746 }
1747
1748
1749 static int gsi_channel_setup(struct gsi *gsi)
1750 {
1751 u32 channel_id = 0;
1752 u32 mask;
1753 int ret;
1754
1755 gsi_irq_enable(gsi);
1756
1757 mutex_lock(&gsi->mutex);
1758
1759 do {
1760 ret = gsi_channel_setup_one(gsi, channel_id);
1761 if (ret)
1762 goto err_unwind;
1763 } while (++channel_id < gsi->channel_count);
1764
1765
1766 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1767 struct gsi_channel *channel = &gsi->channel[channel_id++];
1768
1769 if (!gsi_channel_initialized(channel))
1770 continue;
1771
1772 ret = -EINVAL;
1773 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1774 channel_id - 1);
1775 channel_id = gsi->channel_count;
1776 goto err_unwind;
1777 }
1778
1779
1780 mask = gsi->modem_channel_bitmap;
1781 while (mask) {
1782 u32 modem_channel_id = __ffs(mask);
1783
1784 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1785 if (ret)
1786 goto err_unwind_modem;
1787
1788
1789 mask ^= BIT(modem_channel_id);
1790 }
1791
1792 mutex_unlock(&gsi->mutex);
1793
1794 return 0;
1795
1796 err_unwind_modem:
1797
1798 mask ^= gsi->modem_channel_bitmap;
1799 while (mask) {
1800 channel_id = __fls(mask);
1801
1802 mask ^= BIT(channel_id);
1803
1804 gsi_modem_channel_halt(gsi, channel_id);
1805 }
1806
1807 err_unwind:
1808 while (channel_id--)
1809 gsi_channel_teardown_one(gsi, channel_id);
1810
1811 mutex_unlock(&gsi->mutex);
1812
1813 gsi_irq_disable(gsi);
1814
1815 return ret;
1816 }
1817
1818
1819 static void gsi_channel_teardown(struct gsi *gsi)
1820 {
1821 u32 mask = gsi->modem_channel_bitmap;
1822 u32 channel_id;
1823
1824 mutex_lock(&gsi->mutex);
1825
1826 while (mask) {
1827 channel_id = __fls(mask);
1828
1829 mask ^= BIT(channel_id);
1830
1831 gsi_modem_channel_halt(gsi, channel_id);
1832 }
1833
1834 channel_id = gsi->channel_count - 1;
1835 do
1836 gsi_channel_teardown_one(gsi, channel_id);
1837 while (channel_id--);
1838
1839 mutex_unlock(&gsi->mutex);
1840
1841 gsi_irq_disable(gsi);
1842 }
1843
1844
1845 static int gsi_irq_setup(struct gsi *gsi)
1846 {
1847 int ret;
1848
1849
1850 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1851
1852
1853 gsi_irq_type_update(gsi, 0);
1854
1855
1856 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1857 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1858 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1859 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1860
1861
1862 if (gsi->version > IPA_VERSION_3_1) {
1863 u32 offset;
1864
1865
1866 offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1867 iowrite32(0, gsi->virt_raw + offset);
1868 offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1869 iowrite32(0, gsi->virt_raw + offset);
1870 }
1871
1872 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1873
1874 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1875 if (ret)
1876 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1877
1878 return ret;
1879 }
1880
1881 static void gsi_irq_teardown(struct gsi *gsi)
1882 {
1883 free_irq(gsi->irq, gsi);
1884 }
1885
1886
1887 static int gsi_ring_setup(struct gsi *gsi)
1888 {
1889 struct device *dev = gsi->dev;
1890 u32 count;
1891 u32 val;
1892
1893 if (gsi->version < IPA_VERSION_3_5_1) {
1894
1895 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1896 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1897
1898 return 0;
1899 }
1900
1901 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1902
1903 count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1904 if (!count) {
1905 dev_err(dev, "GSI reports zero channels supported\n");
1906 return -EINVAL;
1907 }
1908 if (count > GSI_CHANNEL_COUNT_MAX) {
1909 dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1910 GSI_CHANNEL_COUNT_MAX, count);
1911 count = GSI_CHANNEL_COUNT_MAX;
1912 }
1913 gsi->channel_count = count;
1914
1915 count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1916 if (!count) {
1917 dev_err(dev, "GSI reports zero event rings supported\n");
1918 return -EINVAL;
1919 }
1920 if (count > GSI_EVT_RING_COUNT_MAX) {
1921 dev_warn(dev,
1922 "limiting to %u event rings; hardware supports %u\n",
1923 GSI_EVT_RING_COUNT_MAX, count);
1924 count = GSI_EVT_RING_COUNT_MAX;
1925 }
1926 gsi->evt_ring_count = count;
1927
1928 return 0;
1929 }
1930
1931
1932 int gsi_setup(struct gsi *gsi)
1933 {
1934 u32 val;
1935 int ret;
1936
1937
1938 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1939 if (!(val & ENABLED_FMASK)) {
1940 dev_err(gsi->dev, "GSI has not been enabled\n");
1941 return -EIO;
1942 }
1943
1944 ret = gsi_irq_setup(gsi);
1945 if (ret)
1946 return ret;
1947
1948 ret = gsi_ring_setup(gsi);
1949 if (ret)
1950 goto err_irq_teardown;
1951
1952
1953 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1954
1955 ret = gsi_channel_setup(gsi);
1956 if (ret)
1957 goto err_irq_teardown;
1958
1959 return 0;
1960
1961 err_irq_teardown:
1962 gsi_irq_teardown(gsi);
1963
1964 return ret;
1965 }
1966
1967
1968 void gsi_teardown(struct gsi *gsi)
1969 {
1970 gsi_channel_teardown(gsi);
1971 gsi_irq_teardown(gsi);
1972 }
1973
1974
1975 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1976 {
1977 struct gsi *gsi = channel->gsi;
1978 struct gsi_evt_ring *evt_ring;
1979 int ret;
1980
1981 ret = gsi_evt_ring_id_alloc(gsi);
1982 if (ret < 0)
1983 return ret;
1984 channel->evt_ring_id = ret;
1985
1986 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1987 evt_ring->channel = channel;
1988
1989 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1990 if (!ret)
1991 return 0;
1992
1993 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1994 ret, gsi_channel_id(channel));
1995
1996 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1997
1998 return ret;
1999 }
2000
2001
2002 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
2003 {
2004 u32 evt_ring_id = channel->evt_ring_id;
2005 struct gsi *gsi = channel->gsi;
2006 struct gsi_evt_ring *evt_ring;
2007
2008 evt_ring = &gsi->evt_ring[evt_ring_id];
2009 gsi_ring_free(gsi, &evt_ring->ring);
2010 gsi_evt_ring_id_free(gsi, evt_ring_id);
2011 }
2012
2013 static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
2014 const struct ipa_gsi_endpoint_data *data)
2015 {
2016 const struct gsi_channel_data *channel_data;
2017 u32 channel_id = data->channel_id;
2018 struct device *dev = gsi->dev;
2019
2020
2021 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2022 dev_err(dev, "bad channel id %u; must be less than %u\n",
2023 channel_id, GSI_CHANNEL_COUNT_MAX);
2024 return false;
2025 }
2026
2027 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2028 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2029 return false;
2030 }
2031
2032 if (command && !data->toward_ipa) {
2033 dev_err(dev, "command channel %u is not TX\n", channel_id);
2034 return false;
2035 }
2036
2037 channel_data = &data->channel;
2038
2039 if (!channel_data->tlv_count ||
2040 channel_data->tlv_count > GSI_TLV_MAX) {
2041 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2042 channel_id, channel_data->tlv_count, GSI_TLV_MAX);
2043 return false;
2044 }
2045
2046 if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
2047 dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
2048 channel_id, IPA_COMMAND_TRANS_TRE_MAX,
2049 channel_data->tlv_count);
2050 return false;
2051 }
2052
2053
2054
2055
2056
2057
2058 if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
2059 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2060 channel_id, channel_data->tlv_count,
2061 channel_data->tre_count);
2062 return false;
2063 }
2064
2065 if (!is_power_of_2(channel_data->tre_count)) {
2066 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2067 channel_id, channel_data->tre_count);
2068 return false;
2069 }
2070
2071 if (!is_power_of_2(channel_data->event_count)) {
2072 dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2073 channel_id, channel_data->event_count);
2074 return false;
2075 }
2076
2077 return true;
2078 }
2079
2080
2081 static int gsi_channel_init_one(struct gsi *gsi,
2082 const struct ipa_gsi_endpoint_data *data,
2083 bool command)
2084 {
2085 struct gsi_channel *channel;
2086 u32 tre_count;
2087 int ret;
2088
2089 if (!gsi_channel_data_valid(gsi, command, data))
2090 return -EINVAL;
2091
2092
2093 if (data->channel.tre_count > data->channel.event_count) {
2094 tre_count = data->channel.event_count;
2095 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2096 data->channel_id, tre_count);
2097 } else {
2098 tre_count = data->channel.tre_count;
2099 }
2100
2101 channel = &gsi->channel[data->channel_id];
2102 memset(channel, 0, sizeof(*channel));
2103
2104 channel->gsi = gsi;
2105 channel->toward_ipa = data->toward_ipa;
2106 channel->command = command;
2107 channel->trans_tre_max = data->channel.tlv_count;
2108 channel->tre_count = tre_count;
2109 channel->event_count = data->channel.event_count;
2110
2111 ret = gsi_channel_evt_ring_init(channel);
2112 if (ret)
2113 goto err_clear_gsi;
2114
2115 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2116 if (ret) {
2117 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2118 ret, data->channel_id);
2119 goto err_channel_evt_ring_exit;
2120 }
2121
2122 ret = gsi_channel_trans_init(gsi, data->channel_id);
2123 if (ret)
2124 goto err_ring_free;
2125
2126 if (command) {
2127 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2128
2129 ret = ipa_cmd_pool_init(channel, tre_max);
2130 }
2131 if (!ret)
2132 return 0;
2133
2134 gsi_channel_trans_exit(channel);
2135 err_ring_free:
2136 gsi_ring_free(gsi, &channel->tre_ring);
2137 err_channel_evt_ring_exit:
2138 gsi_channel_evt_ring_exit(channel);
2139 err_clear_gsi:
2140 channel->gsi = NULL;
2141
2142 return ret;
2143 }
2144
2145
2146 static void gsi_channel_exit_one(struct gsi_channel *channel)
2147 {
2148 if (!gsi_channel_initialized(channel))
2149 return;
2150
2151 if (channel->command)
2152 ipa_cmd_pool_exit(channel);
2153 gsi_channel_trans_exit(channel);
2154 gsi_ring_free(channel->gsi, &channel->tre_ring);
2155 gsi_channel_evt_ring_exit(channel);
2156 }
2157
2158
2159 static int gsi_channel_init(struct gsi *gsi, u32 count,
2160 const struct ipa_gsi_endpoint_data *data)
2161 {
2162 bool modem_alloc;
2163 int ret = 0;
2164 u32 i;
2165
2166
2167 modem_alloc = gsi->version == IPA_VERSION_4_2;
2168
2169 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2170 gsi->ieob_enabled_bitmap = 0;
2171
2172
2173 for (i = 0; i < count; i++) {
2174 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2175
2176 if (ipa_gsi_endpoint_data_empty(&data[i]))
2177 continue;
2178
2179
2180 if (data[i].ee_id == GSI_EE_MODEM) {
2181 if (modem_alloc)
2182 gsi->modem_channel_bitmap |=
2183 BIT(data[i].channel_id);
2184 continue;
2185 }
2186
2187 ret = gsi_channel_init_one(gsi, &data[i], command);
2188 if (ret)
2189 goto err_unwind;
2190 }
2191
2192 return ret;
2193
2194 err_unwind:
2195 while (i--) {
2196 if (ipa_gsi_endpoint_data_empty(&data[i]))
2197 continue;
2198 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2199 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2200 continue;
2201 }
2202 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2203 }
2204
2205 return ret;
2206 }
2207
2208
2209 static void gsi_channel_exit(struct gsi *gsi)
2210 {
2211 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2212
2213 do
2214 gsi_channel_exit_one(&gsi->channel[channel_id]);
2215 while (channel_id--);
2216 gsi->modem_channel_bitmap = 0;
2217 }
2218
2219
2220 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2221 enum ipa_version version, u32 count,
2222 const struct ipa_gsi_endpoint_data *data)
2223 {
2224 struct device *dev = &pdev->dev;
2225 struct resource *res;
2226 resource_size_t size;
2227 u32 adjust;
2228 int ret;
2229
2230 gsi_validate_build();
2231
2232 gsi->dev = dev;
2233 gsi->version = version;
2234
2235
2236
2237
2238 init_dummy_netdev(&gsi->dummy_dev);
2239
2240
2241 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2242 if (!res) {
2243 dev_err(dev, "DT error getting \"gsi\" memory property\n");
2244 return -ENODEV;
2245 }
2246
2247 size = resource_size(res);
2248 if (res->start > U32_MAX || size > U32_MAX - res->start) {
2249 dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2250 return -EINVAL;
2251 }
2252
2253
2254 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2255 if (res->start < adjust) {
2256 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2257 adjust);
2258 return -EINVAL;
2259 }
2260
2261 gsi->virt_raw = ioremap(res->start, size);
2262 if (!gsi->virt_raw) {
2263 dev_err(dev, "unable to remap \"gsi\" memory\n");
2264 return -ENOMEM;
2265 }
2266
2267 gsi->virt = gsi->virt_raw - adjust;
2268
2269 init_completion(&gsi->completion);
2270
2271 ret = gsi_irq_init(gsi, pdev);
2272 if (ret)
2273 goto err_iounmap;
2274
2275 ret = gsi_channel_init(gsi, count, data);
2276 if (ret)
2277 goto err_iounmap;
2278
2279 mutex_init(&gsi->mutex);
2280
2281 return 0;
2282
2283 err_iounmap:
2284 iounmap(gsi->virt_raw);
2285
2286 return ret;
2287 }
2288
2289
2290 void gsi_exit(struct gsi *gsi)
2291 {
2292 mutex_destroy(&gsi->mutex);
2293 gsi_channel_exit(gsi);
2294 iounmap(gsi->virt_raw);
2295 }
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2318 {
2319 struct gsi_channel *channel = &gsi->channel[channel_id];
2320
2321
2322 return channel->tre_count - (channel->trans_tre_max - 1);
2323 }