0001
0002
0003
0004
0005
0006 #include <soc/tegra/ivc.h>
0007
0008 #define TEGRA_IVC_ALIGN 64
0009
0010
0011
0012
0013
0014
0015 enum tegra_ivc_state {
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 TEGRA_IVC_STATE_ESTABLISHED = 0,
0028
0029
0030
0031
0032
0033
0034
0035 TEGRA_IVC_STATE_SYNC,
0036
0037
0038
0039
0040
0041
0042
0043 TEGRA_IVC_STATE_ACK
0044 };
0045
0046
0047
0048
0049
0050
0051
0052
0053 struct tegra_ivc_header {
0054 union {
0055 struct {
0056
0057 u32 count;
0058 u32 state;
0059 };
0060
0061 u8 pad[TEGRA_IVC_ALIGN];
0062 } tx;
0063
0064 union {
0065
0066 u32 count;
0067 u8 pad[TEGRA_IVC_ALIGN];
0068 } rx;
0069 };
0070
0071 static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
0072 {
0073 if (!ivc->peer)
0074 return;
0075
0076 dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
0077 DMA_FROM_DEVICE);
0078 }
0079
0080 static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
0081 {
0082 if (!ivc->peer)
0083 return;
0084
0085 dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
0086 DMA_TO_DEVICE);
0087 }
0088
0089 static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
0090 struct tegra_ivc_header *header)
0091 {
0092
0093
0094
0095
0096
0097 u32 tx = READ_ONCE(header->tx.count);
0098 u32 rx = READ_ONCE(header->rx.count);
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 if (tx - rx > ivc->num_frames)
0111 return true;
0112
0113 return tx == rx;
0114 }
0115
0116 static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
0117 struct tegra_ivc_header *header)
0118 {
0119 u32 tx = READ_ONCE(header->tx.count);
0120 u32 rx = READ_ONCE(header->rx.count);
0121
0122
0123
0124
0125
0126 return tx - rx >= ivc->num_frames;
0127 }
0128
0129 static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
0130 struct tegra_ivc_header *header)
0131 {
0132 u32 tx = READ_ONCE(header->tx.count);
0133 u32 rx = READ_ONCE(header->rx.count);
0134
0135
0136
0137
0138
0139
0140
0141 return tx - rx;
0142 }
0143
0144 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
0145 {
0146 WRITE_ONCE(ivc->tx.channel->tx.count,
0147 READ_ONCE(ivc->tx.channel->tx.count) + 1);
0148
0149 if (ivc->tx.position == ivc->num_frames - 1)
0150 ivc->tx.position = 0;
0151 else
0152 ivc->tx.position++;
0153 }
0154
0155 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
0156 {
0157 WRITE_ONCE(ivc->rx.channel->rx.count,
0158 READ_ONCE(ivc->rx.channel->rx.count) + 1);
0159
0160 if (ivc->rx.position == ivc->num_frames - 1)
0161 ivc->rx.position = 0;
0162 else
0163 ivc->rx.position++;
0164 }
0165
0166 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
0167 {
0168 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0179 return -ECONNRESET;
0180
0181
0182
0183
0184
0185
0186
0187
0188 if (!tegra_ivc_empty(ivc, ivc->rx.channel))
0189 return 0;
0190
0191 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
0192
0193 if (tegra_ivc_empty(ivc, ivc->rx.channel))
0194 return -ENOSPC;
0195
0196 return 0;
0197 }
0198
0199 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
0200 {
0201 unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
0202
0203 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0204 return -ECONNRESET;
0205
0206 if (!tegra_ivc_full(ivc, ivc->tx.channel))
0207 return 0;
0208
0209 tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
0210
0211 if (tegra_ivc_full(ivc, ivc->tx.channel))
0212 return -ENOSPC;
0213
0214 return 0;
0215 }
0216
0217 static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
0218 struct tegra_ivc_header *header,
0219 unsigned int frame)
0220 {
0221 if (WARN_ON(frame >= ivc->num_frames))
0222 return ERR_PTR(-EINVAL);
0223
0224 return (void *)(header + 1) + ivc->frame_size * frame;
0225 }
0226
0227 static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
0228 dma_addr_t phys,
0229 unsigned int frame)
0230 {
0231 unsigned long offset;
0232
0233 offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
0234
0235 return phys + offset;
0236 }
0237
0238 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
0239 dma_addr_t phys,
0240 unsigned int frame,
0241 unsigned int offset,
0242 size_t size)
0243 {
0244 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
0245 return;
0246
0247 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
0248
0249 dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
0250 }
0251
0252 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
0253 dma_addr_t phys,
0254 unsigned int frame,
0255 unsigned int offset,
0256 size_t size)
0257 {
0258 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
0259 return;
0260
0261 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
0262
0263 dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
0264 }
0265
0266
0267 void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
0268 {
0269 int err;
0270
0271 if (WARN_ON(ivc == NULL))
0272 return ERR_PTR(-EINVAL);
0273
0274 err = tegra_ivc_check_read(ivc);
0275 if (err < 0)
0276 return ERR_PTR(err);
0277
0278
0279
0280
0281
0282 smp_rmb();
0283
0284 tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
0285 ivc->frame_size);
0286
0287 return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
0288 }
0289 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
0290
0291 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
0292 {
0293 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
0294 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
0295 int err;
0296
0297
0298
0299
0300
0301
0302 err = tegra_ivc_check_read(ivc);
0303 if (err < 0)
0304 return err;
0305
0306 tegra_ivc_advance_rx(ivc);
0307
0308 tegra_ivc_flush(ivc, ivc->rx.phys + rx);
0309
0310
0311
0312
0313
0314 smp_mb();
0315
0316
0317
0318
0319
0320
0321 tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
0322
0323 if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
0324 ivc->notify(ivc, ivc->notify_data);
0325
0326 return 0;
0327 }
0328 EXPORT_SYMBOL(tegra_ivc_read_advance);
0329
0330
0331 void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
0332 {
0333 int err;
0334
0335 err = tegra_ivc_check_write(ivc);
0336 if (err < 0)
0337 return ERR_PTR(err);
0338
0339 return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
0340 }
0341 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
0342
0343
0344 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
0345 {
0346 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
0347 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
0348 int err;
0349
0350 err = tegra_ivc_check_write(ivc);
0351 if (err < 0)
0352 return err;
0353
0354 tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
0355 ivc->frame_size);
0356
0357
0358
0359
0360
0361 smp_wmb();
0362
0363 tegra_ivc_advance_tx(ivc);
0364 tegra_ivc_flush(ivc, ivc->tx.phys + tx);
0365
0366
0367
0368
0369
0370 smp_mb();
0371
0372
0373
0374
0375
0376
0377 tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
0378
0379 if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
0380 ivc->notify(ivc, ivc->notify_data);
0381
0382 return 0;
0383 }
0384 EXPORT_SYMBOL(tegra_ivc_write_advance);
0385
0386 void tegra_ivc_reset(struct tegra_ivc *ivc)
0387 {
0388 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0389
0390 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
0391 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0392 ivc->notify(ivc, ivc->notify_data);
0393 }
0394 EXPORT_SYMBOL(tegra_ivc_reset);
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 int tegra_ivc_notified(struct tegra_ivc *ivc)
0417 {
0418 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0419 enum tegra_ivc_state state;
0420
0421
0422 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
0423 state = READ_ONCE(ivc->rx.channel->tx.state);
0424
0425 if (state == TEGRA_IVC_STATE_SYNC) {
0426 offset = offsetof(struct tegra_ivc_header, tx.count);
0427
0428
0429
0430
0431
0432 smp_rmb();
0433
0434
0435
0436
0437
0438
0439 ivc->tx.channel->tx.count = 0;
0440 ivc->rx.channel->rx.count = 0;
0441
0442 ivc->tx.position = 0;
0443 ivc->rx.position = 0;
0444
0445
0446
0447
0448
0449 smp_wmb();
0450
0451
0452
0453
0454
0455 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
0456 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0457
0458
0459
0460
0461 ivc->notify(ivc, ivc->notify_data);
0462
0463 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
0464 state == TEGRA_IVC_STATE_ACK) {
0465 offset = offsetof(struct tegra_ivc_header, tx.count);
0466
0467
0468
0469
0470
0471 smp_rmb();
0472
0473
0474
0475
0476
0477
0478 ivc->tx.channel->tx.count = 0;
0479 ivc->rx.channel->rx.count = 0;
0480
0481 ivc->tx.position = 0;
0482 ivc->rx.position = 0;
0483
0484
0485
0486
0487
0488 smp_wmb();
0489
0490
0491
0492
0493
0494
0495 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
0496 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0497
0498
0499
0500
0501 ivc->notify(ivc, ivc->notify_data);
0502
0503 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
0504 offset = offsetof(struct tegra_ivc_header, tx.count);
0505
0506
0507
0508
0509
0510
0511 smp_rmb();
0512
0513
0514
0515
0516
0517
0518
0519 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
0520 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0521
0522
0523
0524
0525 ivc->notify(ivc, ivc->notify_data);
0526
0527 } else {
0528
0529
0530
0531
0532
0533
0534 }
0535
0536 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0537 return -EAGAIN;
0538
0539 return 0;
0540 }
0541 EXPORT_SYMBOL(tegra_ivc_notified);
0542
0543 size_t tegra_ivc_align(size_t size)
0544 {
0545 return ALIGN(size, TEGRA_IVC_ALIGN);
0546 }
0547 EXPORT_SYMBOL(tegra_ivc_align);
0548
0549 unsigned tegra_ivc_total_queue_size(unsigned queue_size)
0550 {
0551 if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
0552 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
0553 __func__, queue_size, TEGRA_IVC_ALIGN);
0554 return 0;
0555 }
0556
0557 return queue_size + sizeof(struct tegra_ivc_header);
0558 }
0559 EXPORT_SYMBOL(tegra_ivc_total_queue_size);
0560
0561 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
0562 unsigned int num_frames, size_t frame_size)
0563 {
0564 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
0565 TEGRA_IVC_ALIGN));
0566 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
0567 TEGRA_IVC_ALIGN));
0568 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
0569 TEGRA_IVC_ALIGN));
0570
0571 if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
0572 pr_err("num_frames * frame_size overflows\n");
0573 return -EINVAL;
0574 }
0575
0576 if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
0577 pr_err("frame size not adequately aligned: %zu\n", frame_size);
0578 return -EINVAL;
0579 }
0580
0581
0582
0583
0584
0585 if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
0586 pr_err("IVC channel start not aligned: %#lx\n", rx);
0587 return -EINVAL;
0588 }
0589
0590 if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
0591 pr_err("IVC channel start not aligned: %#lx\n", tx);
0592 return -EINVAL;
0593 }
0594
0595 if (rx < tx) {
0596 if (rx + frame_size * num_frames > tx) {
0597 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
0598 rx, frame_size * num_frames, tx);
0599 return -EINVAL;
0600 }
0601 } else {
0602 if (tx + frame_size * num_frames > rx) {
0603 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
0604 tx, frame_size * num_frames, rx);
0605 return -EINVAL;
0606 }
0607 }
0608
0609 return 0;
0610 }
0611
0612 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
0613 dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
0614 unsigned int num_frames, size_t frame_size,
0615 void (*notify)(struct tegra_ivc *ivc, void *data),
0616 void *data)
0617 {
0618 size_t queue_size;
0619 int err;
0620
0621 if (WARN_ON(!ivc || !notify))
0622 return -EINVAL;
0623
0624
0625
0626
0627
0628 if (frame_size > INT_MAX)
0629 return -E2BIG;
0630
0631 err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
0632 num_frames, frame_size);
0633 if (err < 0)
0634 return err;
0635
0636 queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
0637
0638 if (peer) {
0639 ivc->rx.phys = dma_map_single(peer, rx, queue_size,
0640 DMA_BIDIRECTIONAL);
0641 if (dma_mapping_error(peer, ivc->rx.phys))
0642 return -ENOMEM;
0643
0644 ivc->tx.phys = dma_map_single(peer, tx, queue_size,
0645 DMA_BIDIRECTIONAL);
0646 if (dma_mapping_error(peer, ivc->tx.phys)) {
0647 dma_unmap_single(peer, ivc->rx.phys, queue_size,
0648 DMA_BIDIRECTIONAL);
0649 return -ENOMEM;
0650 }
0651 } else {
0652 ivc->rx.phys = rx_phys;
0653 ivc->tx.phys = tx_phys;
0654 }
0655
0656 ivc->rx.channel = rx;
0657 ivc->tx.channel = tx;
0658 ivc->peer = peer;
0659 ivc->notify = notify;
0660 ivc->notify_data = data;
0661 ivc->frame_size = frame_size;
0662 ivc->num_frames = num_frames;
0663
0664
0665
0666
0667
0668 ivc->tx.position = 0;
0669 ivc->rx.position = 0;
0670
0671 return 0;
0672 }
0673 EXPORT_SYMBOL(tegra_ivc_init);
0674
0675 void tegra_ivc_cleanup(struct tegra_ivc *ivc)
0676 {
0677 if (ivc->peer) {
0678 size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
0679 ivc->frame_size);
0680
0681 dma_unmap_single(ivc->peer, ivc->rx.phys, size,
0682 DMA_BIDIRECTIONAL);
0683 dma_unmap_single(ivc->peer, ivc->tx.phys, size,
0684 DMA_BIDIRECTIONAL);
0685 }
0686 }
0687 EXPORT_SYMBOL(tegra_ivc_cleanup);