0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bitfield.h>
0009 #include <linux/device.h>
0010 #include <linux/errno.h>
0011 #include <linux/i3c/master.h>
0012 #include <linux/io.h>
0013
0014 #include "hci.h"
0015 #include "cmd.h"
0016 #include "ibi.h"
0017
0018
0019
0020
0021
0022
0023 #define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
0024 #define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
0025
0026 #define PIO_COMMAND_QUEUE_PORT 0x00
0027 #define PIO_RESPONSE_QUEUE_PORT 0x04
0028 #define PIO_XFER_DATA_PORT 0x08
0029 #define PIO_IBI_PORT 0x0c
0030
0031 #define PIO_QUEUE_THLD_CTRL 0x10
0032 #define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
0033 #define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
0034 #define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
0035 #define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
0036
0037 #define PIO_DATA_BUFFER_THLD_CTRL 0x14
0038 #define DATA_RX_START_THLD GENMASK(26, 24)
0039 #define DATA_TX_START_THLD GENMASK(18, 16)
0040 #define DATA_RX_BUF_THLD GENMASK(10, 8)
0041 #define DATA_TX_BUF_THLD GENMASK(2, 0)
0042
0043 #define PIO_QUEUE_SIZE 0x18
0044 #define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
0045 #define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
0046 #define IBI_STATUS_SIZE GENMASK(15, 8)
0047 #define CR_QUEUE_SIZE GENMASK(7, 0)
0048
0049 #define PIO_INTR_STATUS 0x20
0050 #define PIO_INTR_STATUS_ENABLE 0x24
0051 #define PIO_INTR_SIGNAL_ENABLE 0x28
0052 #define PIO_INTR_FORCE 0x2c
0053 #define STAT_TRANSFER_BLOCKED BIT(25)
0054 #define STAT_PERR_RESP_UFLOW BIT(24)
0055 #define STAT_PERR_CMD_OFLOW BIT(23)
0056 #define STAT_PERR_IBI_UFLOW BIT(22)
0057 #define STAT_PERR_RX_UFLOW BIT(21)
0058 #define STAT_PERR_TX_OFLOW BIT(20)
0059 #define STAT_ERR_RESP_QUEUE_FULL BIT(19)
0060 #define STAT_WARN_RESP_QUEUE_FULL BIT(18)
0061 #define STAT_ERR_IBI_QUEUE_FULL BIT(17)
0062 #define STAT_WARN_IBI_QUEUE_FULL BIT(16)
0063 #define STAT_ERR_RX_DATA_FULL BIT(15)
0064 #define STAT_WARN_RX_DATA_FULL BIT(14)
0065 #define STAT_ERR_TX_DATA_EMPTY BIT(13)
0066 #define STAT_WARN_TX_DATA_EMPTY BIT(12)
0067 #define STAT_TRANSFER_ERR BIT(9)
0068 #define STAT_WARN_INS_STOP_MODE BIT(7)
0069 #define STAT_TRANSFER_ABORT BIT(5)
0070 #define STAT_RESP_READY BIT(4)
0071 #define STAT_CMD_QUEUE_READY BIT(3)
0072 #define STAT_IBI_STATUS_THLD BIT(2)
0073 #define STAT_RX_THLD BIT(1)
0074 #define STAT_TX_THLD BIT(0)
0075
0076 #define PIO_QUEUE_CUR_STATUS 0x38
0077 #define CUR_IBI_Q_LEVEL GENMASK(28, 20)
0078 #define CUR_RESP_Q_LEVEL GENMASK(18, 10)
0079 #define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
0080
0081 #define PIO_DATA_BUFFER_CUR_STATUS 0x3c
0082 #define CUR_RX_BUF_LVL GENMASK(26, 16)
0083 #define CUR_TX_BUF_LVL GENMASK(10, 0)
0084
0085
0086
0087
0088
0089 #define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
0090 STAT_WARN_IBI_QUEUE_FULL | \
0091 STAT_WARN_RX_DATA_FULL | \
0092 STAT_WARN_TX_DATA_EMPTY | \
0093 STAT_WARN_INS_STOP_MODE)
0094
0095 #define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
0096 STAT_ERR_IBI_QUEUE_FULL | \
0097 STAT_ERR_RX_DATA_FULL | \
0098 STAT_ERR_TX_DATA_EMPTY)
0099
0100 #define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
0101 STAT_PERR_RESP_UFLOW | \
0102 STAT_PERR_CMD_OFLOW | \
0103 STAT_PERR_IBI_UFLOW | \
0104 STAT_PERR_RX_UFLOW | \
0105 STAT_PERR_TX_OFLOW)
0106
0107 #define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
0108 STAT_TRANSFER_ERR | \
0109 STAT_LATENCY_ERRORS | \
0110 STAT_PROG_ERRORS)
0111
0112 struct hci_pio_dev_ibi_data {
0113 struct i3c_generic_ibi_pool *pool;
0114 unsigned int max_len;
0115 };
0116
0117 struct hci_pio_ibi_data {
0118 struct i3c_ibi_slot *slot;
0119 void *data_ptr;
0120 unsigned int addr;
0121 unsigned int seg_len, seg_cnt;
0122 unsigned int max_len;
0123 bool last_seg;
0124 };
0125
0126 struct hci_pio_data {
0127 spinlock_t lock;
0128 struct hci_xfer *curr_xfer, *xfer_queue;
0129 struct hci_xfer *curr_rx, *rx_queue;
0130 struct hci_xfer *curr_tx, *tx_queue;
0131 struct hci_xfer *curr_resp, *resp_queue;
0132 struct hci_pio_ibi_data ibi;
0133 unsigned int rx_thresh_size, tx_thresh_size;
0134 unsigned int max_ibi_thresh;
0135 u32 reg_queue_thresh;
0136 u32 enabled_irqs;
0137 };
0138
0139 static int hci_pio_init(struct i3c_hci *hci)
0140 {
0141 struct hci_pio_data *pio;
0142 u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
0143
0144 pio = kzalloc(sizeof(*pio), GFP_KERNEL);
0145 if (!pio)
0146 return -ENOMEM;
0147
0148 hci->io_data = pio;
0149 spin_lock_init(&pio->lock);
0150
0151 size_val = pio_reg_read(QUEUE_SIZE);
0152 dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
0153 FIELD_GET(CR_QUEUE_SIZE, size_val));
0154 dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
0155 4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
0156 dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
0157 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
0158 dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
0159 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
0160
0161
0162
0163
0164
0165
0166 rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
0167 tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
0168 if (hci->version_major == 1) {
0169
0170 if (rx_thresh)
0171 rx_thresh -= 1;
0172 if (tx_thresh)
0173 tx_thresh -= 1;
0174 pio->rx_thresh_size = 2 << rx_thresh;
0175 pio->tx_thresh_size = 2 << tx_thresh;
0176 } else {
0177
0178 pio->rx_thresh_size = 1 << rx_thresh;
0179 pio->tx_thresh_size = 1 << tx_thresh;
0180 }
0181 val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
0182 FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
0183 pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
0184
0185
0186
0187
0188
0189
0190 ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
0191 pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
0192 val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
0193 FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
0194 FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
0195 FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
0196 pio_reg_write(QUEUE_THLD_CTRL, val);
0197 pio->reg_queue_thresh = val;
0198
0199
0200 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
0201 pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
0202
0203
0204 pio->enabled_irqs = STAT_ALL_ERRORS;
0205
0206 return 0;
0207 }
0208
0209 static void hci_pio_cleanup(struct i3c_hci *hci)
0210 {
0211 struct hci_pio_data *pio = hci->io_data;
0212
0213 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
0214
0215 if (pio) {
0216 DBG("status = %#x/%#x",
0217 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
0218 BUG_ON(pio->curr_xfer);
0219 BUG_ON(pio->curr_rx);
0220 BUG_ON(pio->curr_tx);
0221 BUG_ON(pio->curr_resp);
0222 kfree(pio);
0223 hci->io_data = NULL;
0224 }
0225 }
0226
0227 static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
0228 {
0229 DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
0230 DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
0231 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
0232 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
0233 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
0234 DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
0235 DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
0236 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
0237 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
0238 }
0239 }
0240
0241 static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
0242 {
0243 struct hci_xfer *xfer = pio->curr_rx;
0244 unsigned int nr_words;
0245 u32 *p;
0246
0247 p = xfer->data;
0248 p += (xfer->data_len - xfer->data_left) / 4;
0249
0250 while (xfer->data_left >= 4) {
0251
0252 if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
0253 return false;
0254 nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
0255
0256 xfer->data_left -= nr_words * 4;
0257 DBG("now %d left %d", nr_words * 4, xfer->data_left);
0258 while (nr_words--)
0259 *p++ = pio_reg_read(XFER_DATA_PORT);
0260 }
0261
0262
0263 return !xfer->data_left;
0264 }
0265
0266 static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
0267 struct hci_pio_data *pio, unsigned int count)
0268 {
0269 struct hci_xfer *xfer = pio->curr_rx;
0270 u32 *p;
0271
0272 DBG("%d remaining", count);
0273
0274 p = xfer->data;
0275 p += (xfer->data_len - xfer->data_left) / 4;
0276
0277 if (count >= 4) {
0278 unsigned int nr_words = count / 4;
0279
0280 xfer->data_left -= nr_words * 4;
0281 DBG("now %d left %d", nr_words * 4, xfer->data_left);
0282 while (nr_words--)
0283 *p++ = pio_reg_read(XFER_DATA_PORT);
0284 }
0285
0286 count &= 3;
0287 if (count) {
0288
0289
0290
0291
0292
0293
0294 u8 *p_byte = (u8 *)p;
0295 u32 data = pio_reg_read(XFER_DATA_PORT);
0296
0297 xfer->data_word_before_partial = data;
0298 xfer->data_left -= count;
0299 data = (__force u32) cpu_to_le32(data);
0300 while (count--) {
0301 *p_byte++ = data;
0302 data >>= 8;
0303 }
0304 }
0305 }
0306
0307 static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
0308 {
0309 struct hci_xfer *xfer = pio->curr_tx;
0310 unsigned int nr_words;
0311 u32 *p;
0312
0313 p = xfer->data;
0314 p += (xfer->data_len - xfer->data_left) / 4;
0315
0316 while (xfer->data_left >= 4) {
0317
0318 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
0319 return false;
0320
0321 nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
0322
0323 xfer->data_left -= nr_words * 4;
0324 DBG("now %d left %d", nr_words * 4, xfer->data_left);
0325 while (nr_words--)
0326 pio_reg_write(XFER_DATA_PORT, *p++);
0327 }
0328
0329 if (xfer->data_left) {
0330
0331
0332
0333
0334
0335
0336
0337 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
0338 return false;
0339 DBG("trailing %d", xfer->data_left);
0340 pio_reg_write(XFER_DATA_PORT, *p);
0341 xfer->data_left = 0;
0342 }
0343
0344 return true;
0345 }
0346
0347 static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
0348 {
0349 while (pio->curr_rx && hci_pio_do_rx(hci, pio))
0350 pio->curr_rx = pio->curr_rx->next_data;
0351 return !pio->curr_rx;
0352 }
0353
0354 static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
0355 {
0356 while (pio->curr_tx && hci_pio_do_tx(hci, pio))
0357 pio->curr_tx = pio->curr_tx->next_data;
0358 return !pio->curr_tx;
0359 }
0360
0361 static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
0362 {
0363 struct hci_xfer *xfer = pio->curr_xfer;
0364 struct hci_xfer *prev_queue_tail;
0365
0366 if (!xfer->data) {
0367 xfer->data_len = xfer->data_left = 0;
0368 return;
0369 }
0370
0371 if (xfer->rnw) {
0372 prev_queue_tail = pio->rx_queue;
0373 pio->rx_queue = xfer;
0374 if (pio->curr_rx) {
0375 prev_queue_tail->next_data = xfer;
0376 } else {
0377 pio->curr_rx = xfer;
0378 if (!hci_pio_process_rx(hci, pio))
0379 pio->enabled_irqs |= STAT_RX_THLD;
0380 }
0381 } else {
0382 prev_queue_tail = pio->tx_queue;
0383 pio->tx_queue = xfer;
0384 if (pio->curr_tx) {
0385 prev_queue_tail->next_data = xfer;
0386 } else {
0387 pio->curr_tx = xfer;
0388 if (!hci_pio_process_tx(hci, pio))
0389 pio->enabled_irqs |= STAT_TX_THLD;
0390 }
0391 }
0392 }
0393
0394 static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
0395 unsigned int words_to_keep)
0396 {
0397 u32 *from = xfer->data;
0398 u32 from_last;
0399 unsigned int received, count;
0400
0401 received = (xfer->data_len - xfer->data_left) / 4;
0402 if ((xfer->data_len - xfer->data_left) & 3) {
0403 from_last = xfer->data_word_before_partial;
0404 received += 1;
0405 } else {
0406 from_last = from[received];
0407 }
0408 from += words_to_keep;
0409 count = received - words_to_keep;
0410
0411 while (count) {
0412 unsigned int room, left, chunk, bytes_to_move;
0413 u32 last_word;
0414
0415 xfer = xfer->next_data;
0416 if (!xfer) {
0417 dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
0418 return;
0419 }
0420
0421 room = DIV_ROUND_UP(xfer->data_len, 4);
0422 left = DIV_ROUND_UP(xfer->data_left, 4);
0423 chunk = min(count, room);
0424 if (chunk > left) {
0425 hci_pio_push_to_next_rx(hci, xfer, chunk - left);
0426 left = chunk;
0427 xfer->data_left = left * 4;
0428 }
0429
0430 bytes_to_move = xfer->data_len - xfer->data_left;
0431 if (bytes_to_move & 3) {
0432
0433 u32 *p = xfer->data;
0434
0435 xfer->data_word_before_partial = p[bytes_to_move / 4];
0436 }
0437 memmove(xfer->data + chunk, xfer->data, bytes_to_move);
0438
0439
0440 chunk -= 1;
0441
0442 memcpy(xfer->data, from, chunk * 4);
0443 xfer->data_left -= chunk * 4;
0444 from += chunk;
0445 count -= chunk;
0446
0447 last_word = (count == 1) ? from_last : *from++;
0448 if (xfer->data_left < 4) {
0449
0450
0451
0452
0453
0454 u8 *p_byte = xfer->data;
0455
0456 p_byte += chunk * 4;
0457 xfer->data_word_before_partial = last_word;
0458 last_word = (__force u32) cpu_to_le32(last_word);
0459 while (xfer->data_left--) {
0460 *p_byte++ = last_word;
0461 last_word >>= 8;
0462 }
0463 } else {
0464 u32 *p = xfer->data;
0465
0466 p[chunk] = last_word;
0467 xfer->data_left -= 4;
0468 }
0469 count--;
0470 }
0471 }
0472
0473 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
0474 u32 status);
0475
0476 static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
0477 {
0478 while (pio->curr_resp &&
0479 (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
0480 struct hci_xfer *xfer = pio->curr_resp;
0481 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
0482 unsigned int tid = RESP_TID(resp);
0483
0484 DBG("resp = 0x%08x", resp);
0485 if (tid != xfer->cmd_tid) {
0486 dev_err(&hci->master.dev,
0487 "response tid=%d when expecting %d\n",
0488 tid, xfer->cmd_tid);
0489
0490 hci_pio_err(hci, pio, STAT_PROG_ERRORS);
0491 return false;
0492 }
0493 xfer->response = resp;
0494
0495 if (pio->curr_rx == xfer) {
0496
0497
0498
0499
0500
0501 unsigned int received, expected, to_keep;
0502
0503 received = xfer->data_len - xfer->data_left;
0504 expected = RESP_DATA_LENGTH(xfer->response);
0505 if (expected > received) {
0506 hci_pio_do_trailing_rx(hci, pio,
0507 expected - received);
0508 } else if (received > expected) {
0509
0510 to_keep = DIV_ROUND_UP(expected, 4);
0511 hci_pio_push_to_next_rx(hci, xfer, to_keep);
0512 }
0513
0514
0515 if (hci_pio_process_rx(hci, pio))
0516 pio->enabled_irqs &= ~STAT_RX_THLD;
0517 }
0518
0519
0520
0521
0522
0523
0524 if (pio->curr_rx == xfer) {
0525 DBG("short RX ?");
0526 pio->curr_rx = pio->curr_rx->next_data;
0527 } else if (pio->curr_tx == xfer) {
0528 DBG("short TX ?");
0529 pio->curr_tx = pio->curr_tx->next_data;
0530 } else if (xfer->data_left) {
0531 DBG("PIO xfer count = %d after response",
0532 xfer->data_left);
0533 }
0534
0535 pio->curr_resp = xfer->next_resp;
0536 if (xfer->completion)
0537 complete(xfer->completion);
0538 }
0539 return !pio->curr_resp;
0540 }
0541
0542 static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
0543 {
0544 struct hci_xfer *xfer = pio->curr_xfer;
0545 struct hci_xfer *prev_queue_tail;
0546
0547 if (!(xfer->cmd_desc[0] & CMD_0_ROC))
0548 return;
0549
0550 prev_queue_tail = pio->resp_queue;
0551 pio->resp_queue = xfer;
0552 if (pio->curr_resp) {
0553 prev_queue_tail->next_resp = xfer;
0554 } else {
0555 pio->curr_resp = xfer;
0556 if (!hci_pio_process_resp(hci, pio))
0557 pio->enabled_irqs |= STAT_RESP_READY;
0558 }
0559 }
0560
0561 static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
0562 {
0563 while (pio->curr_xfer &&
0564 (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
0565
0566
0567
0568
0569 hci_pio_queue_data(hci, pio);
0570
0571
0572
0573
0574
0575 hci_pio_queue_resp(hci, pio);
0576
0577
0578
0579 hci_pio_write_cmd(hci, pio->curr_xfer);
0580
0581
0582
0583 pio->curr_xfer = pio->curr_xfer->next_xfer;
0584 }
0585 return !pio->curr_xfer;
0586 }
0587
0588 static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
0589 {
0590 struct hci_pio_data *pio = hci->io_data;
0591 struct hci_xfer *prev_queue_tail;
0592 int i;
0593
0594 DBG("n = %d", n);
0595
0596
0597 for (i = 0; i < n; i++) {
0598 xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
0599 xfer[i].next_data = NULL;
0600 xfer[i].next_resp = NULL;
0601 xfer[i].data_left = xfer[i].data_len;
0602 }
0603
0604 spin_lock_irq(&pio->lock);
0605 prev_queue_tail = pio->xfer_queue;
0606 pio->xfer_queue = &xfer[n - 1];
0607 if (pio->curr_xfer) {
0608 prev_queue_tail->next_xfer = xfer;
0609 } else {
0610 pio->curr_xfer = xfer;
0611 if (!hci_pio_process_cmd(hci, pio))
0612 pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
0613 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
0614 DBG("status = %#x/%#x",
0615 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
0616 }
0617 spin_unlock_irq(&pio->lock);
0618 return 0;
0619 }
0620
0621 static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
0622 struct hci_pio_data *pio,
0623 struct hci_xfer *xfer, int n)
0624 {
0625 struct hci_xfer *p, **p_prev_next;
0626 int i;
0627
0628
0629
0630
0631
0632
0633
0634 for (p = pio->curr_resp; p; p = p->next_resp)
0635 for (i = 0; i < n; i++)
0636 if (p == &xfer[i])
0637 goto pio_screwed;
0638 for (p = pio->curr_rx; p; p = p->next_data)
0639 for (i = 0; i < n; i++)
0640 if (p == &xfer[i])
0641 goto pio_screwed;
0642 for (p = pio->curr_tx; p; p = p->next_data)
0643 for (i = 0; i < n; i++)
0644 if (p == &xfer[i])
0645 goto pio_screwed;
0646
0647
0648
0649
0650
0651 p_prev_next = &pio->curr_xfer;
0652 for (p = pio->curr_xfer; p; p = p->next_xfer) {
0653 if (p == &xfer[0]) {
0654 *p_prev_next = xfer[n - 1].next_xfer;
0655 break;
0656 }
0657 p_prev_next = &p->next_xfer;
0658 }
0659
0660
0661 return !!p;
0662
0663 pio_screwed:
0664
0665
0666
0667
0668 for (p = pio->curr_resp; p; p = p->next_resp) {
0669 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
0670 if (p->completion)
0671 complete(p->completion);
0672 }
0673 for (p = pio->curr_xfer; p; p = p->next_xfer) {
0674 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
0675 if (p->completion)
0676 complete(p->completion);
0677 }
0678 pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
0679
0680 return true;
0681 }
0682
0683 static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
0684 {
0685 struct hci_pio_data *pio = hci->io_data;
0686 int ret;
0687
0688 spin_lock_irq(&pio->lock);
0689 DBG("n=%d status=%#x/%#x", n,
0690 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
0691 DBG("main_status = %#x/%#x",
0692 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
0693
0694 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
0695 spin_unlock_irq(&pio->lock);
0696 return ret;
0697 }
0698
0699 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
0700 u32 status)
0701 {
0702
0703
0704 if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
0705
0706 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
0707
0708 dev_err(&hci->master.dev,
0709 "orphan response (%#x) on error\n", resp);
0710 }
0711
0712
0713 if (status & STAT_PROG_ERRORS) {
0714 u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
0715 u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
0716
0717 dev_err(&hci->master.dev,
0718 "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
0719 status & STAT_PROG_ERRORS,
0720 FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
0721 FIELD_GET(CUR_RESP_Q_LEVEL, queue),
0722 FIELD_GET(CUR_IBI_Q_LEVEL, queue),
0723 FIELD_GET(CUR_TX_BUF_LVL, data),
0724 FIELD_GET(CUR_RX_BUF_LVL, data));
0725 }
0726
0727
0728 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
0729
0730 if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
0731 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
0732
0733 mipi_i3c_hci_pio_reset(hci);
0734 mipi_i3c_hci_resume(hci);
0735
0736 DBG("status=%#x/%#x",
0737 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
0738 }
0739
0740 static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
0741 struct hci_pio_data *pio,
0742 unsigned int thresh_val)
0743 {
0744 u32 regval = pio->reg_queue_thresh;
0745
0746 regval &= ~QUEUE_IBI_STATUS_THLD;
0747 regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
0748
0749 if (regval != pio->reg_queue_thresh) {
0750 pio_reg_write(QUEUE_THLD_CTRL, regval);
0751 pio->reg_queue_thresh = regval;
0752 DBG("%d", thresh_val);
0753 }
0754 }
0755
0756 static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
0757 struct hci_pio_data *pio)
0758 {
0759 struct hci_pio_ibi_data *ibi = &pio->ibi;
0760 unsigned int nr_words, thresh_val;
0761 u32 *p;
0762
0763 p = ibi->data_ptr;
0764 p += (ibi->seg_len - ibi->seg_cnt) / 4;
0765
0766 while ((nr_words = ibi->seg_cnt/4)) {
0767
0768 thresh_val = min(nr_words, pio->max_ibi_thresh);
0769 hci_pio_set_ibi_thresh(hci, pio, thresh_val);
0770
0771 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
0772 return false;
0773
0774 nr_words = thresh_val;
0775 ibi->seg_cnt -= nr_words * 4;
0776 DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
0777 while (nr_words--)
0778 *p++ = pio_reg_read(IBI_PORT);
0779 }
0780
0781 if (ibi->seg_cnt) {
0782
0783
0784
0785
0786
0787
0788 u32 data;
0789 u8 *p_byte = (u8 *)p;
0790
0791 hci_pio_set_ibi_thresh(hci, pio, 1);
0792 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
0793 return false;
0794 DBG("trailing %d", ibi->seg_cnt);
0795 data = pio_reg_read(IBI_PORT);
0796 data = (__force u32) cpu_to_le32(data);
0797 while (ibi->seg_cnt--) {
0798 *p_byte++ = data;
0799 data >>= 8;
0800 }
0801 }
0802
0803 return true;
0804 }
0805
0806 static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
0807 {
0808 struct hci_pio_ibi_data *ibi = &pio->ibi;
0809 struct i3c_dev_desc *dev;
0810 struct i3c_hci_dev_data *dev_data;
0811 struct hci_pio_dev_ibi_data *dev_ibi;
0812 u32 ibi_status;
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822 ibi_status = pio_reg_read(IBI_PORT);
0823 DBG("status = %#x", ibi_status);
0824 ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
0825 if (ibi_status & IBI_ERROR) {
0826 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
0827 return false;
0828 }
0829
0830 ibi->last_seg = ibi_status & IBI_LAST_STATUS;
0831 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
0832 ibi->seg_cnt = ibi->seg_len;
0833
0834 dev = i3c_hci_addr_to_dev(hci, ibi->addr);
0835 if (!dev) {
0836 dev_err(&hci->master.dev,
0837 "IBI for unknown device %#x\n", ibi->addr);
0838 return true;
0839 }
0840
0841 dev_data = i3c_dev_get_master_data(dev);
0842 dev_ibi = dev_data->ibi_data;
0843 ibi->max_len = dev_ibi->max_len;
0844
0845 if (ibi->seg_len > ibi->max_len) {
0846 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
0847 ibi->seg_len, ibi->max_len);
0848 return true;
0849 }
0850
0851 ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
0852 if (!ibi->slot) {
0853 dev_err(&hci->master.dev, "no free slot for IBI\n");
0854 } else {
0855 ibi->slot->len = 0;
0856 ibi->data_ptr = ibi->slot->data;
0857 }
0858 return true;
0859 }
0860
0861 static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
0862 {
0863 struct hci_pio_ibi_data *ibi = &pio->ibi;
0864 struct hci_pio_dev_ibi_data *dev_ibi;
0865
0866 if (ibi->slot) {
0867 dev_ibi = ibi->slot->dev->common.master_priv;
0868 i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
0869 ibi->slot = NULL;
0870 }
0871 }
0872
0873 static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
0874 {
0875 struct hci_pio_ibi_data *ibi = &pio->ibi;
0876
0877 if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
0878 if (!hci_pio_prep_new_ibi(hci, pio))
0879 return false;
0880
0881 for (;;) {
0882 u32 ibi_status;
0883 unsigned int ibi_addr;
0884
0885 if (ibi->slot) {
0886 if (!hci_pio_get_ibi_segment(hci, pio))
0887 return false;
0888 ibi->slot->len += ibi->seg_len;
0889 ibi->data_ptr += ibi->seg_len;
0890 if (ibi->last_seg) {
0891
0892 i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
0893 ibi->slot = NULL;
0894 hci_pio_set_ibi_thresh(hci, pio, 1);
0895 return true;
0896 }
0897 } else if (ibi->seg_cnt) {
0898
0899
0900
0901
0902
0903
0904 hci_pio_set_ibi_thresh(hci, pio, 1);
0905 do {
0906 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
0907 return false;
0908 pio_reg_read(IBI_PORT);
0909 } while (--ibi->seg_cnt);
0910 if (ibi->last_seg)
0911 return true;
0912 }
0913
0914
0915 hci_pio_set_ibi_thresh(hci, pio, 1);
0916 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
0917 return false;
0918 ibi_status = pio_reg_read(IBI_PORT);
0919 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
0920 if (ibi->addr != ibi_addr) {
0921
0922 dev_err(&hci->master.dev,
0923 "unexp IBI address changed from %d to %d\n",
0924 ibi->addr, ibi_addr);
0925 hci_pio_free_ibi_slot(hci, pio);
0926 }
0927 ibi->last_seg = ibi_status & IBI_LAST_STATUS;
0928 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
0929 ibi->seg_cnt = ibi->seg_len;
0930 if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
0931 dev_err(&hci->master.dev,
0932 "IBI payload too big (%d > %d)\n",
0933 ibi->slot->len + ibi->seg_len, ibi->max_len);
0934 hci_pio_free_ibi_slot(hci, pio);
0935 }
0936 }
0937
0938 return false;
0939 }
0940
0941 static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
0942 const struct i3c_ibi_setup *req)
0943 {
0944 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0945 struct i3c_generic_ibi_pool *pool;
0946 struct hci_pio_dev_ibi_data *dev_ibi;
0947
0948 dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
0949 if (!dev_ibi)
0950 return -ENOMEM;
0951 pool = i3c_generic_ibi_alloc_pool(dev, req);
0952 if (IS_ERR(pool)) {
0953 kfree(dev_ibi);
0954 return PTR_ERR(pool);
0955 }
0956 dev_ibi->pool = pool;
0957 dev_ibi->max_len = req->max_payload_len;
0958 dev_data->ibi_data = dev_ibi;
0959 return 0;
0960 }
0961
0962 static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
0963 {
0964 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0965 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
0966
0967 dev_data->ibi_data = NULL;
0968 i3c_generic_ibi_free_pool(dev_ibi->pool);
0969 kfree(dev_ibi);
0970 }
0971
0972 static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
0973 struct i3c_dev_desc *dev,
0974 struct i3c_ibi_slot *slot)
0975 {
0976 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0977 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
0978
0979 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
0980 }
0981
0982 static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
0983 {
0984 struct hci_pio_data *pio = hci->io_data;
0985 u32 status;
0986
0987 spin_lock(&pio->lock);
0988 status = pio_reg_read(INTR_STATUS);
0989 DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
0990 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
0991 if (!status) {
0992 spin_unlock(&pio->lock);
0993 return false;
0994 }
0995
0996 if (status & STAT_IBI_STATUS_THLD)
0997 hci_pio_process_ibi(hci, pio);
0998
0999 if (status & STAT_RX_THLD)
1000 if (hci_pio_process_rx(hci, pio))
1001 pio->enabled_irqs &= ~STAT_RX_THLD;
1002 if (status & STAT_TX_THLD)
1003 if (hci_pio_process_tx(hci, pio))
1004 pio->enabled_irqs &= ~STAT_TX_THLD;
1005 if (status & STAT_RESP_READY)
1006 if (hci_pio_process_resp(hci, pio))
1007 pio->enabled_irqs &= ~STAT_RESP_READY;
1008
1009 if (unlikely(status & STAT_LATENCY_WARNINGS)) {
1010 pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
1011 dev_warn_ratelimited(&hci->master.dev,
1012 "encountered warning condition %#lx\n",
1013 status & STAT_LATENCY_WARNINGS);
1014 }
1015
1016 if (unlikely(status & STAT_ALL_ERRORS)) {
1017 pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
1018 hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
1019 }
1020
1021 if (status & STAT_CMD_QUEUE_READY)
1022 if (hci_pio_process_cmd(hci, pio))
1023 pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
1024
1025 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
1026 DBG("(out) status: %#x/%#x",
1027 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
1028 spin_unlock(&pio->lock);
1029 return true;
1030 }
1031
1032 const struct hci_io_ops mipi_i3c_hci_pio = {
1033 .init = hci_pio_init,
1034 .cleanup = hci_pio_cleanup,
1035 .queue_xfer = hci_pio_queue_xfer,
1036 .dequeue_xfer = hci_pio_dequeue_xfer,
1037 .irq_handler = hci_pio_irq_handler,
1038 .request_ibi = hci_pio_request_ibi,
1039 .free_ibi = hci_pio_free_ibi,
1040 .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
1041 };