0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bitfield.h>
0012 #include <linux/device.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/errno.h>
0015 #include <linux/i3c/master.h>
0016 #include <linux/io.h>
0017
0018 #include "hci.h"
0019 #include "cmd.h"
0020 #include "ibi.h"
0021
0022
0023
0024
0025
0026
0027
0028 #define XFER_RINGS 1
0029 #define XFER_RING_ENTRIES 16
0030
0031 #define IBI_RINGS 1
0032 #define IBI_STATUS_RING_ENTRIES 32
0033 #define IBI_CHUNK_CACHELINES 1
0034 #define IBI_CHUNK_POOL_SIZE 128
0035
0036
0037
0038
0039
0040 #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
0041 #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
0042
0043 #define RHS_CONTROL 0x00
0044 #define PREAMBLE_SIZE GENMASK(31, 24)
0045 #define HEADER_SIZE GENMASK(23, 16)
0046 #define MAX_HEADER_COUNT_CAP GENMASK(7, 4)
0047 #define MAX_HEADER_COUNT GENMASK(3, 0)
0048
0049 #define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
0050
0051
0052
0053
0054
0055 #define rh_reg_read(r) readl(rh->regs + (RH_##r))
0056 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
0057
0058 #define RH_CR_SETUP 0x00
0059 #define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
0060 #define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
0061 #define CR_RING_SIZE GENMASK(8, 0)
0062
0063 #define RH_IBI_SETUP 0x04
0064 #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
0065 #define IBI_STATUS_RING_SIZE GENMASK(23, 16)
0066 #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
0067 #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
0068
0069 #define RH_CHUNK_CONTROL 0x08
0070
0071 #define RH_INTR_STATUS 0x10
0072 #define RH_INTR_STATUS_ENABLE 0x14
0073 #define RH_INTR_SIGNAL_ENABLE 0x18
0074 #define RH_INTR_FORCE 0x1c
0075 #define INTR_IBI_READY BIT(12)
0076 #define INTR_TRANSFER_COMPLETION BIT(11)
0077 #define INTR_RING_OP BIT(10)
0078 #define INTR_TRANSFER_ERR BIT(9)
0079 #define INTR_WARN_INS_STOP_MODE BIT(7)
0080 #define INTR_IBI_RING_FULL BIT(6)
0081 #define INTR_TRANSFER_ABORT BIT(5)
0082
0083 #define RH_RING_STATUS 0x20
0084 #define RING_STATUS_LOCKED BIT(3)
0085 #define RING_STATUS_ABORTED BIT(2)
0086 #define RING_STATUS_RUNNING BIT(1)
0087 #define RING_STATUS_ENABLED BIT(0)
0088
0089 #define RH_RING_CONTROL 0x24
0090 #define RING_CTRL_ABORT BIT(2)
0091 #define RING_CTRL_RUN_STOP BIT(1)
0092 #define RING_CTRL_ENABLE BIT(0)
0093
0094 #define RH_RING_OPERATION1 0x28
0095 #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
0096 #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
0097 #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
0098
0099 #define RH_RING_OPERATION2 0x2c
0100 #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
0101 #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
0102
0103 #define RH_CMD_RING_BASE_LO 0x30
0104 #define RH_CMD_RING_BASE_HI 0x34
0105 #define RH_RESP_RING_BASE_LO 0x38
0106 #define RH_RESP_RING_BASE_HI 0x3c
0107 #define RH_IBI_STATUS_RING_BASE_LO 0x40
0108 #define RH_IBI_STATUS_RING_BASE_HI 0x44
0109 #define RH_IBI_DATA_RING_BASE_LO 0x48
0110 #define RH_IBI_DATA_RING_BASE_HI 0x4c
0111
0112 #define RH_CMD_RING_SG 0x50
0113 #define RH_RESP_RING_SG 0x54
0114 #define RH_IBI_STATUS_RING_SG 0x58
0115 #define RH_IBI_DATA_RING_SG 0x5c
0116 #define RING_SG_BLP BIT(31)
0117 #define RING_SG_LIST_SIZE GENMASK(15, 0)
0118
0119
0120
0121
0122
0123 #define DATA_BUF_BLP BIT(31)
0124 #define DATA_BUF_IOC BIT(30)
0125 #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
0126
0127
0128 struct hci_rh_data {
0129 void __iomem *regs;
0130 void *xfer, *resp, *ibi_status, *ibi_data;
0131 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
0132 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
0133 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
0134 unsigned int done_ptr, ibi_chunk_ptr;
0135 struct hci_xfer **src_xfers;
0136 spinlock_t lock;
0137 struct completion op_done;
0138 };
0139
0140 struct hci_rings_data {
0141 unsigned int total;
0142 struct hci_rh_data headers[];
0143 };
0144
0145 struct hci_dma_dev_ibi_data {
0146 struct i3c_generic_ibi_pool *pool;
0147 unsigned int max_len;
0148 };
0149
0150 static inline u32 lo32(dma_addr_t physaddr)
0151 {
0152 return physaddr;
0153 }
0154
0155 static inline u32 hi32(dma_addr_t physaddr)
0156 {
0157
0158 if (sizeof(dma_addr_t) > 4) {
0159 u64 hi = physaddr;
0160 return hi >> 32;
0161 }
0162 return 0;
0163 }
0164
0165 static void hci_dma_cleanup(struct i3c_hci *hci)
0166 {
0167 struct hci_rings_data *rings = hci->io_data;
0168 struct hci_rh_data *rh;
0169 unsigned int i;
0170
0171 if (!rings)
0172 return;
0173
0174 for (i = 0; i < rings->total; i++) {
0175 rh = &rings->headers[i];
0176
0177 rh_reg_write(RING_CONTROL, 0);
0178 rh_reg_write(CR_SETUP, 0);
0179 rh_reg_write(IBI_SETUP, 0);
0180 rh_reg_write(INTR_SIGNAL_ENABLE, 0);
0181
0182 if (rh->xfer)
0183 dma_free_coherent(&hci->master.dev,
0184 rh->xfer_struct_sz * rh->xfer_entries,
0185 rh->xfer, rh->xfer_dma);
0186 if (rh->resp)
0187 dma_free_coherent(&hci->master.dev,
0188 rh->resp_struct_sz * rh->xfer_entries,
0189 rh->resp, rh->resp_dma);
0190 kfree(rh->src_xfers);
0191 if (rh->ibi_status)
0192 dma_free_coherent(&hci->master.dev,
0193 rh->ibi_status_sz * rh->ibi_status_entries,
0194 rh->ibi_status, rh->ibi_status_dma);
0195 if (rh->ibi_data_dma)
0196 dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
0197 rh->ibi_chunk_sz * rh->ibi_chunks_total,
0198 DMA_FROM_DEVICE);
0199 kfree(rh->ibi_data);
0200 }
0201
0202 rhs_reg_write(CONTROL, 0);
0203
0204 kfree(rings);
0205 hci->io_data = NULL;
0206 }
0207
0208 static int hci_dma_init(struct i3c_hci *hci)
0209 {
0210 struct hci_rings_data *rings;
0211 struct hci_rh_data *rh;
0212 u32 regval;
0213 unsigned int i, nr_rings, xfers_sz, resps_sz;
0214 unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
0215 int ret;
0216
0217 regval = rhs_reg_read(CONTROL);
0218 nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
0219 dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
0220 if (unlikely(nr_rings > 8)) {
0221 dev_err(&hci->master.dev, "number of rings should be <= 8\n");
0222 nr_rings = 8;
0223 }
0224 if (nr_rings > XFER_RINGS)
0225 nr_rings = XFER_RINGS;
0226 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
0227 if (!rings)
0228 return -ENOMEM;
0229 hci->io_data = rings;
0230 rings->total = nr_rings;
0231
0232 for (i = 0; i < rings->total; i++) {
0233 u32 offset = rhs_reg_read(RHn_OFFSET(i));
0234
0235 dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
0236 ret = -EINVAL;
0237 if (!offset)
0238 goto err_out;
0239 rh = &rings->headers[i];
0240 rh->regs = hci->base_regs + offset;
0241 spin_lock_init(&rh->lock);
0242 init_completion(&rh->op_done);
0243
0244 rh->xfer_entries = XFER_RING_ENTRIES;
0245
0246 regval = rh_reg_read(CR_SETUP);
0247 rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
0248 rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
0249 DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
0250 rh->xfer_struct_sz, rh->resp_struct_sz);
0251 xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
0252 resps_sz = rh->resp_struct_sz * rh->xfer_entries;
0253
0254 rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
0255 &rh->xfer_dma, GFP_KERNEL);
0256 rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
0257 &rh->resp_dma, GFP_KERNEL);
0258 rh->src_xfers =
0259 kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
0260 GFP_KERNEL);
0261 ret = -ENOMEM;
0262 if (!rh->xfer || !rh->resp || !rh->src_xfers)
0263 goto err_out;
0264
0265 rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
0266 rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
0267 rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
0268 rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
0269
0270 regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
0271 rh_reg_write(CR_SETUP, regval);
0272
0273 rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
0274 rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
0275 INTR_TRANSFER_COMPLETION |
0276 INTR_RING_OP |
0277 INTR_TRANSFER_ERR |
0278 INTR_WARN_INS_STOP_MODE |
0279 INTR_IBI_RING_FULL |
0280 INTR_TRANSFER_ABORT);
0281
0282
0283
0284 if (i >= IBI_RINGS)
0285 goto ring_ready;
0286
0287 regval = rh_reg_read(IBI_SETUP);
0288 rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
0289 rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
0290 rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
0291
0292 rh->ibi_chunk_sz = dma_get_cache_alignment();
0293 rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
0294 BUG_ON(rh->ibi_chunk_sz > 256);
0295
0296 ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
0297 ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
0298
0299 rh->ibi_status =
0300 dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
0301 &rh->ibi_status_dma, GFP_KERNEL);
0302 rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
0303 ret = -ENOMEM;
0304 if (!rh->ibi_status || !rh->ibi_data)
0305 goto err_out;
0306 rh->ibi_data_dma =
0307 dma_map_single(&hci->master.dev, rh->ibi_data,
0308 ibi_data_ring_sz, DMA_FROM_DEVICE);
0309 if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
0310 rh->ibi_data_dma = 0;
0311 ret = -ENOMEM;
0312 goto err_out;
0313 }
0314
0315 regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
0316 rh->ibi_status_entries) |
0317 FIELD_PREP(IBI_DATA_CHUNK_SIZE,
0318 ilog2(rh->ibi_chunk_sz) - 2) |
0319 FIELD_PREP(IBI_DATA_CHUNK_COUNT,
0320 rh->ibi_chunks_total);
0321 rh_reg_write(IBI_SETUP, regval);
0322
0323 regval = rh_reg_read(INTR_SIGNAL_ENABLE);
0324 regval |= INTR_IBI_READY;
0325 rh_reg_write(INTR_SIGNAL_ENABLE, regval);
0326
0327 ring_ready:
0328 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
0329 }
0330
0331 regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
0332 rhs_reg_write(CONTROL, regval);
0333 return 0;
0334
0335 err_out:
0336 hci_dma_cleanup(hci);
0337 return ret;
0338 }
0339
0340 static void hci_dma_unmap_xfer(struct i3c_hci *hci,
0341 struct hci_xfer *xfer_list, unsigned int n)
0342 {
0343 struct hci_xfer *xfer;
0344 unsigned int i;
0345
0346 for (i = 0; i < n; i++) {
0347 xfer = xfer_list + i;
0348 dma_unmap_single(&hci->master.dev,
0349 xfer->data_dma, xfer->data_len,
0350 xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
0351 }
0352 }
0353
0354 static int hci_dma_queue_xfer(struct i3c_hci *hci,
0355 struct hci_xfer *xfer_list, int n)
0356 {
0357 struct hci_rings_data *rings = hci->io_data;
0358 struct hci_rh_data *rh;
0359 unsigned int i, ring, enqueue_ptr;
0360 u32 op1_val, op2_val;
0361
0362
0363 ring = 0;
0364 rh = &rings->headers[ring];
0365
0366 op1_val = rh_reg_read(RING_OPERATION1);
0367 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
0368 for (i = 0; i < n; i++) {
0369 struct hci_xfer *xfer = xfer_list + i;
0370 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
0371
0372
0373 *ring_data++ = xfer->cmd_desc[0];
0374 *ring_data++ = xfer->cmd_desc[1];
0375 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
0376 *ring_data++ = xfer->cmd_desc[2];
0377 *ring_data++ = xfer->cmd_desc[3];
0378 }
0379
0380
0381 if (!xfer->data)
0382 xfer->data_len = 0;
0383 *ring_data++ =
0384 FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
0385 ((i == n - 1) ? DATA_BUF_IOC : 0);
0386
0387
0388 if (xfer->data) {
0389 xfer->data_dma =
0390 dma_map_single(&hci->master.dev,
0391 xfer->data,
0392 xfer->data_len,
0393 xfer->rnw ?
0394 DMA_FROM_DEVICE :
0395 DMA_TO_DEVICE);
0396 if (dma_mapping_error(&hci->master.dev,
0397 xfer->data_dma)) {
0398 hci_dma_unmap_xfer(hci, xfer_list, i);
0399 return -ENOMEM;
0400 }
0401 *ring_data++ = lo32(xfer->data_dma);
0402 *ring_data++ = hi32(xfer->data_dma);
0403 } else {
0404 *ring_data++ = 0;
0405 *ring_data++ = 0;
0406 }
0407
0408
0409 rh->src_xfers[enqueue_ptr] = xfer;
0410
0411 xfer->ring_number = ring;
0412 xfer->ring_entry = enqueue_ptr;
0413
0414 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
0415
0416
0417
0418
0419
0420 op2_val = rh_reg_read(RING_OPERATION2);
0421 if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
0422
0423 hci_dma_unmap_xfer(hci, xfer_list, i + 1);
0424 return -EBUSY;
0425 }
0426 }
0427
0428
0429 spin_lock_irq(&rh->lock);
0430 op1_val = rh_reg_read(RING_OPERATION1);
0431 op1_val &= ~RING_OP1_CR_ENQ_PTR;
0432 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
0433 rh_reg_write(RING_OPERATION1, op1_val);
0434 spin_unlock_irq(&rh->lock);
0435
0436 return 0;
0437 }
0438
0439 static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
0440 struct hci_xfer *xfer_list, int n)
0441 {
0442 struct hci_rings_data *rings = hci->io_data;
0443 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
0444 unsigned int i;
0445 bool did_unqueue = false;
0446
0447
0448 rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
0449 if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
0450
0451
0452
0453
0454
0455 dev_crit(&hci->master.dev, "unable to abort the ring\n");
0456 BUG();
0457 }
0458
0459 for (i = 0; i < n; i++) {
0460 struct hci_xfer *xfer = xfer_list + i;
0461 int idx = xfer->ring_entry;
0462
0463
0464
0465
0466
0467
0468 if (idx >= 0) {
0469 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
0470
0471
0472 *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
0473 *ring_data++ = 0;
0474 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
0475 *ring_data++ = 0;
0476 *ring_data++ = 0;
0477 }
0478
0479
0480 rh->src_xfers[idx] = NULL;
0481
0482
0483 hci_dma_unmap_xfer(hci, xfer, 1);
0484
0485 did_unqueue = true;
0486 }
0487 }
0488
0489
0490 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
0491
0492 return did_unqueue;
0493 }
0494
0495 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
0496 {
0497 u32 op1_val, op2_val, resp, *ring_resp;
0498 unsigned int tid, done_ptr = rh->done_ptr;
0499 struct hci_xfer *xfer;
0500
0501 for (;;) {
0502 op2_val = rh_reg_read(RING_OPERATION2);
0503 if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
0504 break;
0505
0506 ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
0507 resp = *ring_resp;
0508 tid = RESP_TID(resp);
0509 DBG("resp = 0x%08x", resp);
0510
0511 xfer = rh->src_xfers[done_ptr];
0512 if (!xfer) {
0513 DBG("orphaned ring entry");
0514 } else {
0515 hci_dma_unmap_xfer(hci, xfer, 1);
0516 xfer->ring_entry = -1;
0517 xfer->response = resp;
0518 if (tid != xfer->cmd_tid) {
0519 dev_err(&hci->master.dev,
0520 "response tid=%d when expecting %d\n",
0521 tid, xfer->cmd_tid);
0522
0523 }
0524 if (xfer->completion)
0525 complete(xfer->completion);
0526 }
0527
0528 done_ptr = (done_ptr + 1) % rh->xfer_entries;
0529 rh->done_ptr = done_ptr;
0530 }
0531
0532
0533 spin_lock(&rh->lock);
0534 op1_val = rh_reg_read(RING_OPERATION1);
0535 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
0536 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
0537 rh_reg_write(RING_OPERATION1, op1_val);
0538 spin_unlock(&rh->lock);
0539 }
0540
0541 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
0542 const struct i3c_ibi_setup *req)
0543 {
0544 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0545 struct i3c_generic_ibi_pool *pool;
0546 struct hci_dma_dev_ibi_data *dev_ibi;
0547
0548 dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
0549 if (!dev_ibi)
0550 return -ENOMEM;
0551 pool = i3c_generic_ibi_alloc_pool(dev, req);
0552 if (IS_ERR(pool)) {
0553 kfree(dev_ibi);
0554 return PTR_ERR(pool);
0555 }
0556 dev_ibi->pool = pool;
0557 dev_ibi->max_len = req->max_payload_len;
0558 dev_data->ibi_data = dev_ibi;
0559 return 0;
0560 }
0561
0562 static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
0563 {
0564 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0565 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
0566
0567 dev_data->ibi_data = NULL;
0568 i3c_generic_ibi_free_pool(dev_ibi->pool);
0569 kfree(dev_ibi);
0570 }
0571
0572 static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
0573 struct i3c_dev_desc *dev,
0574 struct i3c_ibi_slot *slot)
0575 {
0576 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
0577 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
0578
0579 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
0580 }
0581
0582 static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
0583 {
0584 struct i3c_dev_desc *dev;
0585 struct i3c_hci_dev_data *dev_data;
0586 struct hci_dma_dev_ibi_data *dev_ibi;
0587 struct i3c_ibi_slot *slot;
0588 u32 op1_val, op2_val, ibi_status_error;
0589 unsigned int ptr, enq_ptr, deq_ptr;
0590 unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
0591 int ibi_addr, last_ptr;
0592 void *ring_ibi_data;
0593 dma_addr_t ring_ibi_data_dma;
0594
0595 op1_val = rh_reg_read(RING_OPERATION1);
0596 deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
0597
0598 op2_val = rh_reg_read(RING_OPERATION2);
0599 enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
0600
0601 ibi_status_error = 0;
0602 ibi_addr = -1;
0603 ibi_chunks = 0;
0604 ibi_size = 0;
0605 last_ptr = -1;
0606
0607
0608 for (ptr = deq_ptr; ptr != enq_ptr;
0609 ptr = (ptr + 1) % rh->ibi_status_entries) {
0610 u32 ibi_status, *ring_ibi_status;
0611 unsigned int chunks;
0612
0613 ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
0614 ibi_status = *ring_ibi_status;
0615 DBG("status = %#x", ibi_status);
0616
0617 if (ibi_status_error) {
0618
0619 } else if (ibi_status & IBI_ERROR) {
0620 ibi_status_error = ibi_status;
0621 } else if (ibi_addr == -1) {
0622 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
0623 } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
0624
0625 ibi_status_error = ibi_status;
0626 }
0627
0628 chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
0629 ibi_chunks += chunks;
0630 if (!(ibi_status & IBI_LAST_STATUS)) {
0631 ibi_size += chunks * rh->ibi_chunk_sz;
0632 } else {
0633 ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
0634 last_ptr = ptr;
0635 break;
0636 }
0637 }
0638
0639
0640
0641 if (last_ptr == -1) {
0642
0643 DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
0644 return;
0645 }
0646 deq_ptr = last_ptr + 1;
0647 deq_ptr %= rh->ibi_status_entries;
0648
0649 if (ibi_status_error) {
0650 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
0651 goto done;
0652 }
0653
0654
0655 dev = i3c_hci_addr_to_dev(hci, ibi_addr);
0656 if (!dev) {
0657 dev_err(&hci->master.dev,
0658 "IBI for unknown device %#x\n", ibi_addr);
0659 goto done;
0660 }
0661
0662 dev_data = i3c_dev_get_master_data(dev);
0663 dev_ibi = dev_data->ibi_data;
0664 if (ibi_size > dev_ibi->max_len) {
0665 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
0666 ibi_size, dev_ibi->max_len);
0667 goto done;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680 slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
0681 if (!slot) {
0682 dev_err(&hci->master.dev, "no free slot for IBI\n");
0683 goto done;
0684 }
0685
0686
0687 ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
0688 ring_ibi_data = rh->ibi_data + ibi_data_offset;
0689 ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
0690 first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
0691 * rh->ibi_chunk_sz;
0692 if (first_part > ibi_size)
0693 first_part = ibi_size;
0694 dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
0695 first_part, DMA_FROM_DEVICE);
0696 memcpy(slot->data, ring_ibi_data, first_part);
0697
0698
0699 if (ibi_size > first_part) {
0700
0701 ring_ibi_data = rh->ibi_data;
0702 ring_ibi_data_dma = rh->ibi_data_dma;
0703 dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
0704 ibi_size - first_part, DMA_FROM_DEVICE);
0705 memcpy(slot->data + first_part, ring_ibi_data,
0706 ibi_size - first_part);
0707 }
0708
0709
0710 slot->dev = dev;
0711 slot->len = ibi_size;
0712 i3c_master_queue_ibi(dev, slot);
0713
0714 done:
0715
0716 spin_lock(&rh->lock);
0717 op1_val = rh_reg_read(RING_OPERATION1);
0718 op1_val &= ~RING_OP1_IBI_DEQ_PTR;
0719 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
0720 rh_reg_write(RING_OPERATION1, op1_val);
0721 spin_unlock(&rh->lock);
0722
0723
0724 rh->ibi_chunk_ptr += ibi_chunks;
0725 rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
0726
0727
0728 rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
0729 }
0730
0731 static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
0732 {
0733 struct hci_rings_data *rings = hci->io_data;
0734 unsigned int i;
0735 bool handled = false;
0736
0737 for (i = 0; mask && i < 8; i++) {
0738 struct hci_rh_data *rh;
0739 u32 status;
0740
0741 if (!(mask & BIT(i)))
0742 continue;
0743 mask &= ~BIT(i);
0744
0745 rh = &rings->headers[i];
0746 status = rh_reg_read(INTR_STATUS);
0747 DBG("rh%d status: %#x", i, status);
0748 if (!status)
0749 continue;
0750 rh_reg_write(INTR_STATUS, status);
0751
0752 if (status & INTR_IBI_READY)
0753 hci_dma_process_ibi(hci, rh);
0754 if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
0755 hci_dma_xfer_done(hci, rh);
0756 if (status & INTR_RING_OP)
0757 complete(&rh->op_done);
0758
0759 if (status & INTR_TRANSFER_ABORT)
0760 dev_notice_ratelimited(&hci->master.dev,
0761 "ring %d: Transfer Aborted\n", i);
0762 if (status & INTR_WARN_INS_STOP_MODE)
0763 dev_warn_ratelimited(&hci->master.dev,
0764 "ring %d: Inserted Stop on Mode Change\n", i);
0765 if (status & INTR_IBI_RING_FULL)
0766 dev_err_ratelimited(&hci->master.dev,
0767 "ring %d: IBI Ring Full Condition\n", i);
0768
0769 handled = true;
0770 }
0771
0772 return handled;
0773 }
0774
0775 const struct hci_io_ops mipi_i3c_hci_dma = {
0776 .init = hci_dma_init,
0777 .cleanup = hci_dma_cleanup,
0778 .queue_xfer = hci_dma_queue_xfer,
0779 .dequeue_xfer = hci_dma_dequeue_xfer,
0780 .irq_handler = hci_dma_irq_handler,
0781 .request_ibi = hci_dma_request_ibi,
0782 .free_ibi = hci_dma_free_ibi,
0783 .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
0784 };