Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
0004  */
0005 
0006 #include <soc/tegra/ivc.h>
0007 
0008 #define TEGRA_IVC_ALIGN 64
0009 
0010 /*
0011  * IVC channel reset protocol.
0012  *
0013  * Each end uses its tx_channel.state to indicate its synchronization state.
0014  */
0015 enum tegra_ivc_state {
0016     /*
0017      * This value is zero for backwards compatibility with services that
0018      * assume channels to be initially zeroed. Such channels are in an
0019      * initially valid state, but cannot be asynchronously reset, and must
0020      * maintain a valid state at all times.
0021      *
0022      * The transmitting end can enter the established state from the sync or
0023      * ack state when it observes the receiving endpoint in the ack or
0024      * established state, indicating that has cleared the counters in our
0025      * rx_channel.
0026      */
0027     TEGRA_IVC_STATE_ESTABLISHED = 0,
0028 
0029     /*
0030      * If an endpoint is observed in the sync state, the remote endpoint is
0031      * allowed to clear the counters it owns asynchronously with respect to
0032      * the current endpoint. Therefore, the current endpoint is no longer
0033      * allowed to communicate.
0034      */
0035     TEGRA_IVC_STATE_SYNC,
0036 
0037     /*
0038      * When the transmitting end observes the receiving end in the sync
0039      * state, it can clear the w_count and r_count and transition to the ack
0040      * state. If the remote endpoint observes us in the ack state, it can
0041      * return to the established state once it has cleared its counters.
0042      */
0043     TEGRA_IVC_STATE_ACK
0044 };
0045 
0046 /*
0047  * This structure is divided into two-cache aligned parts, the first is only
0048  * written through the tx.channel pointer, while the second is only written
0049  * through the rx.channel pointer. This delineates ownership of the cache
0050  * lines, which is critical to performance and necessary in non-cache coherent
0051  * implementations.
0052  */
0053 struct tegra_ivc_header {
0054     union {
0055         struct {
0056             /* fields owned by the transmitting end */
0057             u32 count;
0058             u32 state;
0059         };
0060 
0061         u8 pad[TEGRA_IVC_ALIGN];
0062     } tx;
0063 
0064     union {
0065         /* fields owned by the receiving end */
0066         u32 count;
0067         u8 pad[TEGRA_IVC_ALIGN];
0068     } rx;
0069 };
0070 
0071 static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
0072 {
0073     if (!ivc->peer)
0074         return;
0075 
0076     dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
0077                 DMA_FROM_DEVICE);
0078 }
0079 
0080 static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
0081 {
0082     if (!ivc->peer)
0083         return;
0084 
0085     dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
0086                    DMA_TO_DEVICE);
0087 }
0088 
0089 static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
0090                    struct tegra_ivc_header *header)
0091 {
0092     /*
0093      * This function performs multiple checks on the same values with
0094      * security implications, so create snapshots with READ_ONCE() to
0095      * ensure that these checks use the same values.
0096      */
0097     u32 tx = READ_ONCE(header->tx.count);
0098     u32 rx = READ_ONCE(header->rx.count);
0099 
0100     /*
0101      * Perform an over-full check to prevent denial of service attacks
0102      * where a server could be easily fooled into believing that there's
0103      * an extremely large number of frames ready, since receivers are not
0104      * expected to check for full or over-full conditions.
0105      *
0106      * Although the channel isn't empty, this is an invalid case caused by
0107      * a potentially malicious peer, so returning empty is safer, because
0108      * it gives the impression that the channel has gone silent.
0109      */
0110     if (tx - rx > ivc->num_frames)
0111         return true;
0112 
0113     return tx == rx;
0114 }
0115 
0116 static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
0117                   struct tegra_ivc_header *header)
0118 {
0119     u32 tx = READ_ONCE(header->tx.count);
0120     u32 rx = READ_ONCE(header->rx.count);
0121 
0122     /*
0123      * Invalid cases where the counters indicate that the queue is over
0124      * capacity also appear full.
0125      */
0126     return tx - rx >= ivc->num_frames;
0127 }
0128 
0129 static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
0130                       struct tegra_ivc_header *header)
0131 {
0132     u32 tx = READ_ONCE(header->tx.count);
0133     u32 rx = READ_ONCE(header->rx.count);
0134 
0135     /*
0136      * This function isn't expected to be used in scenarios where an
0137      * over-full situation can lead to denial of service attacks. See the
0138      * comment in tegra_ivc_empty() for an explanation about special
0139      * over-full considerations.
0140      */
0141     return tx - rx;
0142 }
0143 
0144 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
0145 {
0146     WRITE_ONCE(ivc->tx.channel->tx.count,
0147            READ_ONCE(ivc->tx.channel->tx.count) + 1);
0148 
0149     if (ivc->tx.position == ivc->num_frames - 1)
0150         ivc->tx.position = 0;
0151     else
0152         ivc->tx.position++;
0153 }
0154 
0155 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
0156 {
0157     WRITE_ONCE(ivc->rx.channel->rx.count,
0158            READ_ONCE(ivc->rx.channel->rx.count) + 1);
0159 
0160     if (ivc->rx.position == ivc->num_frames - 1)
0161         ivc->rx.position = 0;
0162     else
0163         ivc->rx.position++;
0164 }
0165 
0166 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
0167 {
0168     unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0169 
0170     /*
0171      * tx.channel->state is set locally, so it is not synchronized with
0172      * state from the remote peer. The remote peer cannot reset its
0173      * transmit counters until we've acknowledged its synchronization
0174      * request, so no additional synchronization is required because an
0175      * asynchronous transition of rx.channel->state to
0176      * TEGRA_IVC_STATE_ACK is not allowed.
0177      */
0178     if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0179         return -ECONNRESET;
0180 
0181     /*
0182      * Avoid unnecessary invalidations when performing repeated accesses
0183      * to an IVC channel by checking the old queue pointers first.
0184      *
0185      * Synchronization is only necessary when these pointers indicate
0186      * empty or full.
0187      */
0188     if (!tegra_ivc_empty(ivc, ivc->rx.channel))
0189         return 0;
0190 
0191     tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
0192 
0193     if (tegra_ivc_empty(ivc, ivc->rx.channel))
0194         return -ENOSPC;
0195 
0196     return 0;
0197 }
0198 
0199 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
0200 {
0201     unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
0202 
0203     if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0204         return -ECONNRESET;
0205 
0206     if (!tegra_ivc_full(ivc, ivc->tx.channel))
0207         return 0;
0208 
0209     tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
0210 
0211     if (tegra_ivc_full(ivc, ivc->tx.channel))
0212         return -ENOSPC;
0213 
0214     return 0;
0215 }
0216 
0217 static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
0218                   struct tegra_ivc_header *header,
0219                   unsigned int frame)
0220 {
0221     if (WARN_ON(frame >= ivc->num_frames))
0222         return ERR_PTR(-EINVAL);
0223 
0224     return (void *)(header + 1) + ivc->frame_size * frame;
0225 }
0226 
0227 static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
0228                           dma_addr_t phys,
0229                           unsigned int frame)
0230 {
0231     unsigned long offset;
0232 
0233     offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
0234 
0235     return phys + offset;
0236 }
0237 
0238 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
0239                           dma_addr_t phys,
0240                           unsigned int frame,
0241                           unsigned int offset,
0242                           size_t size)
0243 {
0244     if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
0245         return;
0246 
0247     phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
0248 
0249     dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
0250 }
0251 
0252 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
0253                      dma_addr_t phys,
0254                      unsigned int frame,
0255                      unsigned int offset,
0256                      size_t size)
0257 {
0258     if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
0259         return;
0260 
0261     phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
0262 
0263     dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
0264 }
0265 
0266 /* directly peek at the next frame rx'ed */
0267 void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
0268 {
0269     int err;
0270 
0271     if (WARN_ON(ivc == NULL))
0272         return ERR_PTR(-EINVAL);
0273 
0274     err = tegra_ivc_check_read(ivc);
0275     if (err < 0)
0276         return ERR_PTR(err);
0277 
0278     /*
0279      * Order observation of ivc->rx.position potentially indicating new
0280      * data before data read.
0281      */
0282     smp_rmb();
0283 
0284     tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
0285                    ivc->frame_size);
0286 
0287     return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
0288 }
0289 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
0290 
0291 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
0292 {
0293     unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
0294     unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
0295     int err;
0296 
0297     /*
0298      * No read barriers or synchronization here: the caller is expected to
0299      * have already observed the channel non-empty. This check is just to
0300      * catch programming errors.
0301      */
0302     err = tegra_ivc_check_read(ivc);
0303     if (err < 0)
0304         return err;
0305 
0306     tegra_ivc_advance_rx(ivc);
0307 
0308     tegra_ivc_flush(ivc, ivc->rx.phys + rx);
0309 
0310     /*
0311      * Ensure our write to ivc->rx.position occurs before our read from
0312      * ivc->tx.position.
0313      */
0314     smp_mb();
0315 
0316     /*
0317      * Notify only upon transition from full to non-full. The available
0318      * count can only asynchronously increase, so the worst possible
0319      * side-effect will be a spurious notification.
0320      */
0321     tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
0322 
0323     if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
0324         ivc->notify(ivc, ivc->notify_data);
0325 
0326     return 0;
0327 }
0328 EXPORT_SYMBOL(tegra_ivc_read_advance);
0329 
0330 /* directly poke at the next frame to be tx'ed */
0331 void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
0332 {
0333     int err;
0334 
0335     err = tegra_ivc_check_write(ivc);
0336     if (err < 0)
0337         return ERR_PTR(err);
0338 
0339     return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
0340 }
0341 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
0342 
0343 /* advance the tx buffer */
0344 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
0345 {
0346     unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
0347     unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
0348     int err;
0349 
0350     err = tegra_ivc_check_write(ivc);
0351     if (err < 0)
0352         return err;
0353 
0354     tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
0355                   ivc->frame_size);
0356 
0357     /*
0358      * Order any possible stores to the frame before update of
0359      * ivc->tx.position.
0360      */
0361     smp_wmb();
0362 
0363     tegra_ivc_advance_tx(ivc);
0364     tegra_ivc_flush(ivc, ivc->tx.phys + tx);
0365 
0366     /*
0367      * Ensure our write to ivc->tx.position occurs before our read from
0368      * ivc->rx.position.
0369      */
0370     smp_mb();
0371 
0372     /*
0373      * Notify only upon transition from empty to non-empty. The available
0374      * count can only asynchronously decrease, so the worst possible
0375      * side-effect will be a spurious notification.
0376      */
0377     tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
0378 
0379     if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
0380         ivc->notify(ivc, ivc->notify_data);
0381 
0382     return 0;
0383 }
0384 EXPORT_SYMBOL(tegra_ivc_write_advance);
0385 
0386 void tegra_ivc_reset(struct tegra_ivc *ivc)
0387 {
0388     unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0389 
0390     ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
0391     tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0392     ivc->notify(ivc, ivc->notify_data);
0393 }
0394 EXPORT_SYMBOL(tegra_ivc_reset);
0395 
0396 /*
0397  * =======================================================
0398  *  IVC State Transition Table - see tegra_ivc_notified()
0399  * =======================================================
0400  *
0401  *  local   remote  action
0402  *  -----   ------  -----------------------------------
0403  *  SYNC    EST <none>
0404  *  SYNC    ACK reset counters; move to EST; notify
0405  *  SYNC    SYNC    reset counters; move to ACK; notify
0406  *  ACK EST move to EST; notify
0407  *  ACK ACK move to EST; notify
0408  *  ACK SYNC    reset counters; move to ACK; notify
0409  *  EST EST <none>
0410  *  EST ACK <none>
0411  *  EST SYNC    reset counters; move to ACK; notify
0412  *
0413  * ===============================================================
0414  */
0415 
0416 int tegra_ivc_notified(struct tegra_ivc *ivc)
0417 {
0418     unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
0419     enum tegra_ivc_state state;
0420 
0421     /* Copy the receiver's state out of shared memory. */
0422     tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
0423     state = READ_ONCE(ivc->rx.channel->tx.state);
0424 
0425     if (state == TEGRA_IVC_STATE_SYNC) {
0426         offset = offsetof(struct tegra_ivc_header, tx.count);
0427 
0428         /*
0429          * Order observation of TEGRA_IVC_STATE_SYNC before stores
0430          * clearing tx.channel.
0431          */
0432         smp_rmb();
0433 
0434         /*
0435          * Reset tx.channel counters. The remote end is in the SYNC
0436          * state and won't make progress until we change our state,
0437          * so the counters are not in use at this time.
0438          */
0439         ivc->tx.channel->tx.count = 0;
0440         ivc->rx.channel->rx.count = 0;
0441 
0442         ivc->tx.position = 0;
0443         ivc->rx.position = 0;
0444 
0445         /*
0446          * Ensure that counters appear cleared before new state can be
0447          * observed.
0448          */
0449         smp_wmb();
0450 
0451         /*
0452          * Move to ACK state. We have just cleared our counters, so it
0453          * is now safe for the remote end to start using these values.
0454          */
0455         ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
0456         tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0457 
0458         /*
0459          * Notify remote end to observe state transition.
0460          */
0461         ivc->notify(ivc, ivc->notify_data);
0462 
0463     } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
0464            state == TEGRA_IVC_STATE_ACK) {
0465         offset = offsetof(struct tegra_ivc_header, tx.count);
0466 
0467         /*
0468          * Order observation of ivc_state_sync before stores clearing
0469          * tx_channel.
0470          */
0471         smp_rmb();
0472 
0473         /*
0474          * Reset tx.channel counters. The remote end is in the ACK
0475          * state and won't make progress until we change our state,
0476          * so the counters are not in use at this time.
0477          */
0478         ivc->tx.channel->tx.count = 0;
0479         ivc->rx.channel->rx.count = 0;
0480 
0481         ivc->tx.position = 0;
0482         ivc->rx.position = 0;
0483 
0484         /*
0485          * Ensure that counters appear cleared before new state can be
0486          * observed.
0487          */
0488         smp_wmb();
0489 
0490         /*
0491          * Move to ESTABLISHED state. We know that the remote end has
0492          * already cleared its counters, so it is safe to start
0493          * writing/reading on this channel.
0494          */
0495         ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
0496         tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0497 
0498         /*
0499          * Notify remote end to observe state transition.
0500          */
0501         ivc->notify(ivc, ivc->notify_data);
0502 
0503     } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
0504         offset = offsetof(struct tegra_ivc_header, tx.count);
0505 
0506         /*
0507          * At this point, we have observed the peer to be in either
0508          * the ACK or ESTABLISHED state. Next, order observation of
0509          * peer state before storing to tx.channel.
0510          */
0511         smp_rmb();
0512 
0513         /*
0514          * Move to ESTABLISHED state. We know that we have previously
0515          * cleared our counters, and we know that the remote end has
0516          * cleared its counters, so it is safe to start writing/reading
0517          * on this channel.
0518          */
0519         ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
0520         tegra_ivc_flush(ivc, ivc->tx.phys + offset);
0521 
0522         /*
0523          * Notify remote end to observe state transition.
0524          */
0525         ivc->notify(ivc, ivc->notify_data);
0526 
0527     } else {
0528         /*
0529          * There is no need to handle any further action. Either the
0530          * channel is already fully established, or we are waiting for
0531          * the remote end to catch up with our current state. Refer
0532          * to the diagram in "IVC State Transition Table" above.
0533          */
0534     }
0535 
0536     if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
0537         return -EAGAIN;
0538 
0539     return 0;
0540 }
0541 EXPORT_SYMBOL(tegra_ivc_notified);
0542 
0543 size_t tegra_ivc_align(size_t size)
0544 {
0545     return ALIGN(size, TEGRA_IVC_ALIGN);
0546 }
0547 EXPORT_SYMBOL(tegra_ivc_align);
0548 
0549 unsigned tegra_ivc_total_queue_size(unsigned queue_size)
0550 {
0551     if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
0552         pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
0553                __func__, queue_size, TEGRA_IVC_ALIGN);
0554         return 0;
0555     }
0556 
0557     return queue_size + sizeof(struct tegra_ivc_header);
0558 }
0559 EXPORT_SYMBOL(tegra_ivc_total_queue_size);
0560 
0561 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
0562                   unsigned int num_frames, size_t frame_size)
0563 {
0564     BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
0565                  TEGRA_IVC_ALIGN));
0566     BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
0567                  TEGRA_IVC_ALIGN));
0568     BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
0569                  TEGRA_IVC_ALIGN));
0570 
0571     if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
0572         pr_err("num_frames * frame_size overflows\n");
0573         return -EINVAL;
0574     }
0575 
0576     if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
0577         pr_err("frame size not adequately aligned: %zu\n", frame_size);
0578         return -EINVAL;
0579     }
0580 
0581     /*
0582      * The headers must at least be aligned enough for counters
0583      * to be accessed atomically.
0584      */
0585     if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
0586         pr_err("IVC channel start not aligned: %#lx\n", rx);
0587         return -EINVAL;
0588     }
0589 
0590     if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
0591         pr_err("IVC channel start not aligned: %#lx\n", tx);
0592         return -EINVAL;
0593     }
0594 
0595     if (rx < tx) {
0596         if (rx + frame_size * num_frames > tx) {
0597             pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
0598                    rx, frame_size * num_frames, tx);
0599             return -EINVAL;
0600         }
0601     } else {
0602         if (tx + frame_size * num_frames > rx) {
0603             pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
0604                    tx, frame_size * num_frames, rx);
0605             return -EINVAL;
0606         }
0607     }
0608 
0609     return 0;
0610 }
0611 
0612 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
0613            dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
0614            unsigned int num_frames, size_t frame_size,
0615            void (*notify)(struct tegra_ivc *ivc, void *data),
0616            void *data)
0617 {
0618     size_t queue_size;
0619     int err;
0620 
0621     if (WARN_ON(!ivc || !notify))
0622         return -EINVAL;
0623 
0624     /*
0625      * All sizes that can be returned by communication functions should
0626      * fit in an int.
0627      */
0628     if (frame_size > INT_MAX)
0629         return -E2BIG;
0630 
0631     err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
0632                      num_frames, frame_size);
0633     if (err < 0)
0634         return err;
0635 
0636     queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
0637 
0638     if (peer) {
0639         ivc->rx.phys = dma_map_single(peer, rx, queue_size,
0640                           DMA_BIDIRECTIONAL);
0641         if (dma_mapping_error(peer, ivc->rx.phys))
0642             return -ENOMEM;
0643 
0644         ivc->tx.phys = dma_map_single(peer, tx, queue_size,
0645                           DMA_BIDIRECTIONAL);
0646         if (dma_mapping_error(peer, ivc->tx.phys)) {
0647             dma_unmap_single(peer, ivc->rx.phys, queue_size,
0648                      DMA_BIDIRECTIONAL);
0649             return -ENOMEM;
0650         }
0651     } else {
0652         ivc->rx.phys = rx_phys;
0653         ivc->tx.phys = tx_phys;
0654     }
0655 
0656     ivc->rx.channel = rx;
0657     ivc->tx.channel = tx;
0658     ivc->peer = peer;
0659     ivc->notify = notify;
0660     ivc->notify_data = data;
0661     ivc->frame_size = frame_size;
0662     ivc->num_frames = num_frames;
0663 
0664     /*
0665      * These values aren't necessarily correct until the channel has been
0666      * reset.
0667      */
0668     ivc->tx.position = 0;
0669     ivc->rx.position = 0;
0670 
0671     return 0;
0672 }
0673 EXPORT_SYMBOL(tegra_ivc_init);
0674 
0675 void tegra_ivc_cleanup(struct tegra_ivc *ivc)
0676 {
0677     if (ivc->peer) {
0678         size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
0679                              ivc->frame_size);
0680 
0681         dma_unmap_single(ivc->peer, ivc->rx.phys, size,
0682                  DMA_BIDIRECTIONAL);
0683         dma_unmap_single(ivc->peer, ivc->tx.phys, size,
0684                  DMA_BIDIRECTIONAL);
0685     }
0686 }
0687 EXPORT_SYMBOL(tegra_ivc_cleanup);