Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Header file for dma buffer sharing framework.
0004  *
0005  * Copyright(C) 2011 Linaro Limited. All rights reserved.
0006  * Author: Sumit Semwal <sumit.semwal@ti.com>
0007  *
0008  * Many thanks to linaro-mm-sig list, and specially
0009  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
0010  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
0011  * refining of this idea.
0012  */
0013 #ifndef __DMA_BUF_H__
0014 #define __DMA_BUF_H__
0015 
0016 #include <linux/iosys-map.h>
0017 #include <linux/file.h>
0018 #include <linux/err.h>
0019 #include <linux/scatterlist.h>
0020 #include <linux/list.h>
0021 #include <linux/dma-mapping.h>
0022 #include <linux/fs.h>
0023 #include <linux/dma-fence.h>
0024 #include <linux/wait.h>
0025 
0026 struct device;
0027 struct dma_buf;
0028 struct dma_buf_attachment;
0029 
0030 /**
0031  * struct dma_buf_ops - operations possible on struct dma_buf
0032  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
0033  *    address space. Same restrictions as for vmap and friends apply.
0034  * @vunmap: [optional] unmaps a vmap from the buffer
0035  */
0036 struct dma_buf_ops {
0037     /**
0038       * @cache_sgt_mapping:
0039       *
0040       * If true the framework will cache the first mapping made for each
0041       * attachment. This avoids creating mappings for attachments multiple
0042       * times.
0043       */
0044     bool cache_sgt_mapping;
0045 
0046     /**
0047      * @attach:
0048      *
0049      * This is called from dma_buf_attach() to make sure that a given
0050      * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
0051      * which support buffer objects in special locations like VRAM or
0052      * device-specific carveout areas should check whether the buffer could
0053      * be move to system memory (or directly accessed by the provided
0054      * device), and otherwise need to fail the attach operation.
0055      *
0056      * The exporter should also in general check whether the current
0057      * allocation fulfills the DMA constraints of the new device. If this
0058      * is not the case, and the allocation cannot be moved, it should also
0059      * fail the attach operation.
0060      *
0061      * Any exporter-private housekeeping data can be stored in the
0062      * &dma_buf_attachment.priv pointer.
0063      *
0064      * This callback is optional.
0065      *
0066      * Returns:
0067      *
0068      * 0 on success, negative error code on failure. It might return -EBUSY
0069      * to signal that backing storage is already allocated and incompatible
0070      * with the requirements of requesting device.
0071      */
0072     int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
0073 
0074     /**
0075      * @detach:
0076      *
0077      * This is called by dma_buf_detach() to release a &dma_buf_attachment.
0078      * Provided so that exporters can clean up any housekeeping for an
0079      * &dma_buf_attachment.
0080      *
0081      * This callback is optional.
0082      */
0083     void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
0084 
0085     /**
0086      * @pin:
0087      *
0088      * This is called by dma_buf_pin() and lets the exporter know that the
0089      * DMA-buf can't be moved any more. Ideally, the exporter should
0090      * pin the buffer so that it is generally accessible by all
0091      * devices.
0092      *
0093      * This is called with the &dmabuf.resv object locked and is mutual
0094      * exclusive with @cache_sgt_mapping.
0095      *
0096      * This is called automatically for non-dynamic importers from
0097      * dma_buf_attach().
0098      *
0099      * Note that similar to non-dynamic exporters in their @map_dma_buf
0100      * callback the driver must guarantee that the memory is available for
0101      * use and cleared of any old data by the time this function returns.
0102      * Drivers which pipeline their buffer moves internally must wait for
0103      * all moves and clears to complete.
0104      *
0105      * Returns:
0106      *
0107      * 0 on success, negative error code on failure.
0108      */
0109     int (*pin)(struct dma_buf_attachment *attach);
0110 
0111     /**
0112      * @unpin:
0113      *
0114      * This is called by dma_buf_unpin() and lets the exporter know that the
0115      * DMA-buf can be moved again.
0116      *
0117      * This is called with the dmabuf->resv object locked and is mutual
0118      * exclusive with @cache_sgt_mapping.
0119      *
0120      * This callback is optional.
0121      */
0122     void (*unpin)(struct dma_buf_attachment *attach);
0123 
0124     /**
0125      * @map_dma_buf:
0126      *
0127      * This is called by dma_buf_map_attachment() and is used to map a
0128      * shared &dma_buf into device address space, and it is mandatory. It
0129      * can only be called if @attach has been called successfully.
0130      *
0131      * This call may sleep, e.g. when the backing storage first needs to be
0132      * allocated, or moved to a location suitable for all currently attached
0133      * devices.
0134      *
0135      * Note that any specific buffer attributes required for this function
0136      * should get added to device_dma_parameters accessible via
0137      * &device.dma_params from the &dma_buf_attachment. The @attach callback
0138      * should also check these constraints.
0139      *
0140      * If this is being called for the first time, the exporter can now
0141      * choose to scan through the list of attachments for this buffer,
0142      * collate the requirements of the attached devices, and choose an
0143      * appropriate backing storage for the buffer.
0144      *
0145      * Based on enum dma_data_direction, it might be possible to have
0146      * multiple users accessing at the same time (for reading, maybe), or
0147      * any other kind of sharing that the exporter might wish to make
0148      * available to buffer-users.
0149      *
0150      * This is always called with the dmabuf->resv object locked when
0151      * the dynamic_mapping flag is true.
0152      *
0153      * Note that for non-dynamic exporters the driver must guarantee that
0154      * that the memory is available for use and cleared of any old data by
0155      * the time this function returns.  Drivers which pipeline their buffer
0156      * moves internally must wait for all moves and clears to complete.
0157      * Dynamic exporters do not need to follow this rule: For non-dynamic
0158      * importers the buffer is already pinned through @pin, which has the
0159      * same requirements. Dynamic importers otoh are required to obey the
0160      * dma_resv fences.
0161      *
0162      * Returns:
0163      *
0164      * A &sg_table scatter list of the backing storage of the DMA buffer,
0165      * already mapped into the device address space of the &device attached
0166      * with the provided &dma_buf_attachment. The addresses and lengths in
0167      * the scatter list are PAGE_SIZE aligned.
0168      *
0169      * On failure, returns a negative error value wrapped into a pointer.
0170      * May also return -EINTR when a signal was received while being
0171      * blocked.
0172      *
0173      * Note that exporters should not try to cache the scatter list, or
0174      * return the same one for multiple calls. Caching is done either by the
0175      * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
0176      * of the scatter list is transferred to the caller, and returned by
0177      * @unmap_dma_buf.
0178      */
0179     struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
0180                      enum dma_data_direction);
0181     /**
0182      * @unmap_dma_buf:
0183      *
0184      * This is called by dma_buf_unmap_attachment() and should unmap and
0185      * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
0186      * For static dma_buf handling this might also unpin the backing
0187      * storage if this is the last mapping of the DMA buffer.
0188      */
0189     void (*unmap_dma_buf)(struct dma_buf_attachment *,
0190                   struct sg_table *,
0191                   enum dma_data_direction);
0192 
0193     /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
0194      * if the call would block.
0195      */
0196 
0197     /**
0198      * @release:
0199      *
0200      * Called after the last dma_buf_put to release the &dma_buf, and
0201      * mandatory.
0202      */
0203     void (*release)(struct dma_buf *);
0204 
0205     /**
0206      * @begin_cpu_access:
0207      *
0208      * This is called from dma_buf_begin_cpu_access() and allows the
0209      * exporter to ensure that the memory is actually coherent for cpu
0210      * access. The exporter also needs to ensure that cpu access is coherent
0211      * for the access direction. The direction can be used by the exporter
0212      * to optimize the cache flushing, i.e. access with a different
0213      * direction (read instead of write) might return stale or even bogus
0214      * data (e.g. when the exporter needs to copy the data to temporary
0215      * storage).
0216      *
0217      * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
0218      * command for userspace mappings established through @mmap, and also
0219      * for kernel mappings established with @vmap.
0220      *
0221      * This callback is optional.
0222      *
0223      * Returns:
0224      *
0225      * 0 on success or a negative error code on failure. This can for
0226      * example fail when the backing storage can't be allocated. Can also
0227      * return -ERESTARTSYS or -EINTR when the call has been interrupted and
0228      * needs to be restarted.
0229      */
0230     int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
0231 
0232     /**
0233      * @end_cpu_access:
0234      *
0235      * This is called from dma_buf_end_cpu_access() when the importer is
0236      * done accessing the CPU. The exporter can use this to flush caches and
0237      * undo anything else done in @begin_cpu_access.
0238      *
0239      * This callback is optional.
0240      *
0241      * Returns:
0242      *
0243      * 0 on success or a negative error code on failure. Can return
0244      * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
0245      * to be restarted.
0246      */
0247     int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
0248 
0249     /**
0250      * @mmap:
0251      *
0252      * This callback is used by the dma_buf_mmap() function
0253      *
0254      * Note that the mapping needs to be incoherent, userspace is expected
0255      * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
0256      *
0257      * Because dma-buf buffers have invariant size over their lifetime, the
0258      * dma-buf core checks whether a vma is too large and rejects such
0259      * mappings. The exporter hence does not need to duplicate this check.
0260      * Drivers do not need to check this themselves.
0261      *
0262      * If an exporter needs to manually flush caches and hence needs to fake
0263      * coherency for mmap support, it needs to be able to zap all the ptes
0264      * pointing at the backing storage. Now linux mm needs a struct
0265      * address_space associated with the struct file stored in vma->vm_file
0266      * to do that with the function unmap_mapping_range. But the dma_buf
0267      * framework only backs every dma_buf fd with the anon_file struct file,
0268      * i.e. all dma_bufs share the same file.
0269      *
0270      * Hence exporters need to setup their own file (and address_space)
0271      * association by setting vma->vm_file and adjusting vma->vm_pgoff in
0272      * the dma_buf mmap callback. In the specific case of a gem driver the
0273      * exporter could use the shmem file already provided by gem (and set
0274      * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
0275      * corresponding range of the struct address_space associated with their
0276      * own file.
0277      *
0278      * This callback is optional.
0279      *
0280      * Returns:
0281      *
0282      * 0 on success or a negative error code on failure.
0283      */
0284     int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
0285 
0286     int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
0287     void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
0288 };
0289 
0290 /**
0291  * struct dma_buf - shared buffer object
0292  *
0293  * This represents a shared buffer, created by calling dma_buf_export(). The
0294  * userspace representation is a normal file descriptor, which can be created by
0295  * calling dma_buf_fd().
0296  *
0297  * Shared dma buffers are reference counted using dma_buf_put() and
0298  * get_dma_buf().
0299  *
0300  * Device DMA access is handled by the separate &struct dma_buf_attachment.
0301  */
0302 struct dma_buf {
0303     /**
0304      * @size:
0305      *
0306      * Size of the buffer; invariant over the lifetime of the buffer.
0307      */
0308     size_t size;
0309 
0310     /**
0311      * @file:
0312      *
0313      * File pointer used for sharing buffers across, and for refcounting.
0314      * See dma_buf_get() and dma_buf_put().
0315      */
0316     struct file *file;
0317 
0318     /**
0319      * @attachments:
0320      *
0321      * List of dma_buf_attachment that denotes all devices attached,
0322      * protected by &dma_resv lock @resv.
0323      */
0324     struct list_head attachments;
0325 
0326     /** @ops: dma_buf_ops associated with this buffer object. */
0327     const struct dma_buf_ops *ops;
0328 
0329     /**
0330      * @lock:
0331      *
0332      * Used internally to serialize list manipulation, attach/detach and
0333      * vmap/unmap. Note that in many cases this is superseeded by
0334      * dma_resv_lock() on @resv.
0335      */
0336     struct mutex lock;
0337 
0338     /**
0339      * @vmapping_counter:
0340      *
0341      * Used internally to refcnt the vmaps returned by dma_buf_vmap().
0342      * Protected by @lock.
0343      */
0344     unsigned vmapping_counter;
0345 
0346     /**
0347      * @vmap_ptr:
0348      * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
0349      */
0350     struct iosys_map vmap_ptr;
0351 
0352     /**
0353      * @exp_name:
0354      *
0355      * Name of the exporter; useful for debugging. See the
0356      * DMA_BUF_SET_NAME IOCTL.
0357      */
0358     const char *exp_name;
0359 
0360     /**
0361      * @name:
0362      *
0363      * Userspace-provided name; useful for accounting and debugging,
0364      * protected by dma_resv_lock() on @resv and @name_lock for read access.
0365      */
0366     const char *name;
0367 
0368     /** @name_lock: Spinlock to protect name acces for read access. */
0369     spinlock_t name_lock;
0370 
0371     /**
0372      * @owner:
0373      *
0374      * Pointer to exporter module; used for refcounting when exporter is a
0375      * kernel module.
0376      */
0377     struct module *owner;
0378 
0379     /** @list_node: node for dma_buf accounting and debugging. */
0380     struct list_head list_node;
0381 
0382     /** @priv: exporter specific private data for this buffer object. */
0383     void *priv;
0384 
0385     /**
0386      * @resv:
0387      *
0388      * Reservation object linked to this dma-buf.
0389      *
0390      * IMPLICIT SYNCHRONIZATION RULES:
0391      *
0392      * Drivers which support implicit synchronization of buffer access as
0393      * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
0394      * below rules.
0395      *
0396      * - Drivers must add a read fence through dma_resv_add_fence() with the
0397      *   DMA_RESV_USAGE_READ flag for anything the userspace API considers a
0398      *   read access. This highly depends upon the API and window system.
0399      *
0400      * - Similarly drivers must add a write fence through
0401      *   dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
0402      *   anything the userspace API considers write access.
0403      *
0404      * - Drivers may just always add a write fence, since that only
0405      *   causes unecessarily synchronization, but no correctness issues.
0406      *
0407      * - Some drivers only expose a synchronous userspace API with no
0408      *   pipelining across drivers. These do not set any fences for their
0409      *   access. An example here is v4l.
0410      *
0411      * - Driver should use dma_resv_usage_rw() when retrieving fences as
0412      *   dependency for implicit synchronization.
0413      *
0414      * DYNAMIC IMPORTER RULES:
0415      *
0416      * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
0417      * additional constraints on how they set up fences:
0418      *
0419      * - Dynamic importers must obey the write fences and wait for them to
0420      *   signal before allowing access to the buffer's underlying storage
0421      *   through the device.
0422      *
0423      * - Dynamic importers should set fences for any access that they can't
0424      *   disable immediately from their &dma_buf_attach_ops.move_notify
0425      *   callback.
0426      *
0427      * IMPORTANT:
0428      *
0429      * All drivers and memory management related functions must obey the
0430      * struct dma_resv rules, specifically the rules for updating and
0431      * obeying fences. See enum dma_resv_usage for further descriptions.
0432      */
0433     struct dma_resv *resv;
0434 
0435     /** @poll: for userspace poll support */
0436     wait_queue_head_t poll;
0437 
0438     /** @cb_in: for userspace poll support */
0439     /** @cb_out: for userspace poll support */
0440     struct dma_buf_poll_cb_t {
0441         struct dma_fence_cb cb;
0442         wait_queue_head_t *poll;
0443 
0444         __poll_t active;
0445     } cb_in, cb_out;
0446 #ifdef CONFIG_DMABUF_SYSFS_STATS
0447     /**
0448      * @sysfs_entry:
0449      *
0450      * For exposing information about this buffer in sysfs. See also
0451      * `DMA-BUF statistics`_ for the uapi this enables.
0452      */
0453     struct dma_buf_sysfs_entry {
0454         struct kobject kobj;
0455         struct dma_buf *dmabuf;
0456     } *sysfs_entry;
0457 #endif
0458 };
0459 
0460 /**
0461  * struct dma_buf_attach_ops - importer operations for an attachment
0462  *
0463  * Attachment operations implemented by the importer.
0464  */
0465 struct dma_buf_attach_ops {
0466     /**
0467      * @allow_peer2peer:
0468      *
0469      * If this is set to true the importer must be able to handle peer
0470      * resources without struct pages.
0471      */
0472     bool allow_peer2peer;
0473 
0474     /**
0475      * @move_notify: [optional] notification that the DMA-buf is moving
0476      *
0477      * If this callback is provided the framework can avoid pinning the
0478      * backing store while mappings exists.
0479      *
0480      * This callback is called with the lock of the reservation object
0481      * associated with the dma_buf held and the mapping function must be
0482      * called with this lock held as well. This makes sure that no mapping
0483      * is created concurrently with an ongoing move operation.
0484      *
0485      * Mappings stay valid and are not directly affected by this callback.
0486      * But the DMA-buf can now be in a different physical location, so all
0487      * mappings should be destroyed and re-created as soon as possible.
0488      *
0489      * New mappings can be created after this callback returns, and will
0490      * point to the new location of the DMA-buf.
0491      */
0492     void (*move_notify)(struct dma_buf_attachment *attach);
0493 };
0494 
0495 /**
0496  * struct dma_buf_attachment - holds device-buffer attachment data
0497  * @dmabuf: buffer for this attachment.
0498  * @dev: device attached to the buffer.
0499  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
0500  * @sgt: cached mapping.
0501  * @dir: direction of cached mapping.
0502  * @peer2peer: true if the importer can handle peer resources without pages.
0503  * @priv: exporter specific attachment data.
0504  * @importer_ops: importer operations for this attachment, if provided
0505  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
0506  * @importer_priv: importer specific attachment data.
0507  *
0508  * This structure holds the attachment information between the dma_buf buffer
0509  * and its user device(s). The list contains one attachment struct per device
0510  * attached to the buffer.
0511  *
0512  * An attachment is created by calling dma_buf_attach(), and released again by
0513  * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
0514  * transfer is created by dma_buf_map_attachment() and freed again by calling
0515  * dma_buf_unmap_attachment().
0516  */
0517 struct dma_buf_attachment {
0518     struct dma_buf *dmabuf;
0519     struct device *dev;
0520     struct list_head node;
0521     struct sg_table *sgt;
0522     enum dma_data_direction dir;
0523     bool peer2peer;
0524     const struct dma_buf_attach_ops *importer_ops;
0525     void *importer_priv;
0526     void *priv;
0527 };
0528 
0529 /**
0530  * struct dma_buf_export_info - holds information needed to export a dma_buf
0531  * @exp_name:   name of the exporter - useful for debugging.
0532  * @owner:  pointer to exporter module - used for refcounting kernel module
0533  * @ops:    Attach allocator-defined dma buf ops to the new buffer
0534  * @size:   Size of the buffer - invariant over the lifetime of the buffer
0535  * @flags:  mode flags for the file
0536  * @resv:   reservation-object, NULL to allocate default one
0537  * @priv:   Attach private data of allocator to this buffer
0538  *
0539  * This structure holds the information required to export the buffer. Used
0540  * with dma_buf_export() only.
0541  */
0542 struct dma_buf_export_info {
0543     const char *exp_name;
0544     struct module *owner;
0545     const struct dma_buf_ops *ops;
0546     size_t size;
0547     int flags;
0548     struct dma_resv *resv;
0549     void *priv;
0550 };
0551 
0552 /**
0553  * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
0554  * @name: export-info name
0555  *
0556  * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
0557  * zeroes it out and pre-populates exp_name in it.
0558  */
0559 #define DEFINE_DMA_BUF_EXPORT_INFO(name)    \
0560     struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
0561                      .owner = THIS_MODULE }
0562 
0563 /**
0564  * get_dma_buf - convenience wrapper for get_file.
0565  * @dmabuf: [in]    pointer to dma_buf
0566  *
0567  * Increments the reference count on the dma-buf, needed in case of drivers
0568  * that either need to create additional references to the dmabuf on the
0569  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
0570  * so that subsequent exports don't create a new dmabuf.
0571  */
0572 static inline void get_dma_buf(struct dma_buf *dmabuf)
0573 {
0574     get_file(dmabuf->file);
0575 }
0576 
0577 /**
0578  * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
0579  * @dmabuf: the DMA-buf to check
0580  *
0581  * Returns true if a DMA-buf exporter wants to be called with the dma_resv
0582  * locked for the map/unmap callbacks, false if it doesn't wants to be called
0583  * with the lock held.
0584  */
0585 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
0586 {
0587     return !!dmabuf->ops->pin;
0588 }
0589 
0590 /**
0591  * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
0592  * mappings
0593  * @attach: the DMA-buf attachment to check
0594  *
0595  * Returns true if a DMA-buf importer wants to call the map/unmap functions with
0596  * the dma_resv lock held.
0597  */
0598 static inline bool
0599 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
0600 {
0601     return !!attach->importer_ops;
0602 }
0603 
0604 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
0605                       struct device *dev);
0606 struct dma_buf_attachment *
0607 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
0608                const struct dma_buf_attach_ops *importer_ops,
0609                void *importer_priv);
0610 void dma_buf_detach(struct dma_buf *dmabuf,
0611             struct dma_buf_attachment *attach);
0612 int dma_buf_pin(struct dma_buf_attachment *attach);
0613 void dma_buf_unpin(struct dma_buf_attachment *attach);
0614 
0615 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
0616 
0617 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
0618 struct dma_buf *dma_buf_get(int fd);
0619 void dma_buf_put(struct dma_buf *dmabuf);
0620 
0621 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
0622                     enum dma_data_direction);
0623 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
0624                 enum dma_data_direction);
0625 void dma_buf_move_notify(struct dma_buf *dma_buf);
0626 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
0627                  enum dma_data_direction dir);
0628 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
0629                enum dma_data_direction dir);
0630 
0631 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
0632          unsigned long);
0633 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
0634 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
0635 #endif /* __DMA_BUF_H__ */