Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SCATTERLIST_H
0003 #define _LINUX_SCATTERLIST_H
0004 
0005 #include <linux/string.h>
0006 #include <linux/types.h>
0007 #include <linux/bug.h>
0008 #include <linux/mm.h>
0009 #include <asm/io.h>
0010 
0011 struct scatterlist {
0012     unsigned long   page_link;
0013     unsigned int    offset;
0014     unsigned int    length;
0015     dma_addr_t  dma_address;
0016 #ifdef CONFIG_NEED_SG_DMA_LENGTH
0017     unsigned int    dma_length;
0018 #endif
0019 #ifdef CONFIG_PCI_P2PDMA
0020     unsigned int    dma_flags;
0021 #endif
0022 };
0023 
0024 /*
0025  * These macros should be used after a dma_map_sg call has been done
0026  * to get bus addresses of each of the SG entries and their lengths.
0027  * You should only work with the number of sg entries dma_map_sg
0028  * returns, or alternatively stop on the first sg_dma_len(sg) which
0029  * is 0.
0030  */
0031 #define sg_dma_address(sg)  ((sg)->dma_address)
0032 
0033 #ifdef CONFIG_NEED_SG_DMA_LENGTH
0034 #define sg_dma_len(sg)      ((sg)->dma_length)
0035 #else
0036 #define sg_dma_len(sg)      ((sg)->length)
0037 #endif
0038 
0039 struct sg_table {
0040     struct scatterlist *sgl;    /* the list */
0041     unsigned int nents;     /* number of mapped entries */
0042     unsigned int orig_nents;    /* original size of list */
0043 };
0044 
0045 struct sg_append_table {
0046     struct sg_table sgt;        /* The scatter list table */
0047     struct scatterlist *prv;    /* last populated sge in the table */
0048     unsigned int total_nents;   /* Total entries in the table */
0049 };
0050 
0051 /*
0052  * Notes on SG table design.
0053  *
0054  * We use the unsigned long page_link field in the scatterlist struct to place
0055  * the page pointer AND encode information about the sg table as well. The two
0056  * lower bits are reserved for this information.
0057  *
0058  * If bit 0 is set, then the page_link contains a pointer to the next sg
0059  * table list. Otherwise the next entry is at sg + 1.
0060  *
0061  * If bit 1 is set, then this sg entry is the last element in a list.
0062  *
0063  * See sg_next().
0064  *
0065  */
0066 
0067 #define SG_CHAIN    0x01UL
0068 #define SG_END      0x02UL
0069 
0070 /*
0071  * We overload the LSB of the page pointer to indicate whether it's
0072  * a valid sg entry, or whether it points to the start of a new scatterlist.
0073  * Those low bits are there for everyone! (thanks mason :-)
0074  */
0075 #define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
0076 
0077 static inline unsigned int __sg_flags(struct scatterlist *sg)
0078 {
0079     return sg->page_link & SG_PAGE_LINK_MASK;
0080 }
0081 
0082 static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
0083 {
0084     return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
0085 }
0086 
0087 static inline bool sg_is_chain(struct scatterlist *sg)
0088 {
0089     return __sg_flags(sg) & SG_CHAIN;
0090 }
0091 
0092 static inline bool sg_is_last(struct scatterlist *sg)
0093 {
0094     return __sg_flags(sg) & SG_END;
0095 }
0096 
0097 /**
0098  * sg_assign_page - Assign a given page to an SG entry
0099  * @sg:         SG entry
0100  * @page:       The page
0101  *
0102  * Description:
0103  *   Assign page to sg entry. Also see sg_set_page(), the most commonly used
0104  *   variant.
0105  *
0106  **/
0107 static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
0108 {
0109     unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
0110 
0111     /*
0112      * In order for the low bit stealing approach to work, pages
0113      * must be aligned at a 32-bit boundary as a minimum.
0114      */
0115     BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
0116 #ifdef CONFIG_DEBUG_SG
0117     BUG_ON(sg_is_chain(sg));
0118 #endif
0119     sg->page_link = page_link | (unsigned long) page;
0120 }
0121 
0122 /**
0123  * sg_set_page - Set sg entry to point at given page
0124  * @sg:      SG entry
0125  * @page:    The page
0126  * @len:     Length of data
0127  * @offset:  Offset into page
0128  *
0129  * Description:
0130  *   Use this function to set an sg entry pointing at a page, never assign
0131  *   the page directly. We encode sg table information in the lower bits
0132  *   of the page pointer. See sg_page() for looking up the page belonging
0133  *   to an sg entry.
0134  *
0135  **/
0136 static inline void sg_set_page(struct scatterlist *sg, struct page *page,
0137                    unsigned int len, unsigned int offset)
0138 {
0139     sg_assign_page(sg, page);
0140     sg->offset = offset;
0141     sg->length = len;
0142 }
0143 
0144 static inline struct page *sg_page(struct scatterlist *sg)
0145 {
0146 #ifdef CONFIG_DEBUG_SG
0147     BUG_ON(sg_is_chain(sg));
0148 #endif
0149     return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
0150 }
0151 
0152 /**
0153  * sg_set_buf - Set sg entry to point at given data
0154  * @sg:      SG entry
0155  * @buf:     Data
0156  * @buflen:  Data length
0157  *
0158  **/
0159 static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
0160                   unsigned int buflen)
0161 {
0162 #ifdef CONFIG_DEBUG_SG
0163     BUG_ON(!virt_addr_valid(buf));
0164 #endif
0165     sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
0166 }
0167 
0168 /*
0169  * Loop over each sg element, following the pointer to a new list if necessary
0170  */
0171 #define for_each_sg(sglist, sg, nr, __i)    \
0172     for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
0173 
0174 /*
0175  * Loop over each sg element in the given sg_table object.
0176  */
0177 #define for_each_sgtable_sg(sgt, sg, i)     \
0178     for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
0179 
0180 /*
0181  * Loop over each sg element in the given *DMA mapped* sg_table object.
0182  * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
0183  * of the each element.
0184  */
0185 #define for_each_sgtable_dma_sg(sgt, sg, i) \
0186     for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
0187 
0188 static inline void __sg_chain(struct scatterlist *chain_sg,
0189                   struct scatterlist *sgl)
0190 {
0191     /*
0192      * offset and length are unused for chain entry. Clear them.
0193      */
0194     chain_sg->offset = 0;
0195     chain_sg->length = 0;
0196 
0197     /*
0198      * Set lowest bit to indicate a link pointer, and make sure to clear
0199      * the termination bit if it happens to be set.
0200      */
0201     chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END;
0202 }
0203 
0204 /**
0205  * sg_chain - Chain two sglists together
0206  * @prv:    First scatterlist
0207  * @prv_nents:  Number of entries in prv
0208  * @sgl:    Second scatterlist
0209  *
0210  * Description:
0211  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
0212  *
0213  **/
0214 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
0215                 struct scatterlist *sgl)
0216 {
0217     __sg_chain(&prv[prv_nents - 1], sgl);
0218 }
0219 
0220 /**
0221  * sg_mark_end - Mark the end of the scatterlist
0222  * @sg:      SG entryScatterlist
0223  *
0224  * Description:
0225  *   Marks the passed in sg entry as the termination point for the sg
0226  *   table. A call to sg_next() on this entry will return NULL.
0227  *
0228  **/
0229 static inline void sg_mark_end(struct scatterlist *sg)
0230 {
0231     /*
0232      * Set termination bit, clear potential chain bit
0233      */
0234     sg->page_link |= SG_END;
0235     sg->page_link &= ~SG_CHAIN;
0236 }
0237 
0238 /**
0239  * sg_unmark_end - Undo setting the end of the scatterlist
0240  * @sg:      SG entryScatterlist
0241  *
0242  * Description:
0243  *   Removes the termination marker from the given entry of the scatterlist.
0244  *
0245  **/
0246 static inline void sg_unmark_end(struct scatterlist *sg)
0247 {
0248     sg->page_link &= ~SG_END;
0249 }
0250 
0251 /*
0252  * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
0253  * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
0254  * Use this padding for DMA flags bits to indicate when a specific
0255  * dma address is a bus address.
0256  */
0257 #ifdef CONFIG_PCI_P2PDMA
0258 
0259 #define SG_DMA_BUS_ADDRESS (1 << 0)
0260 
0261 /**
0262  * sg_dma_is_bus address - Return whether a given segment was marked
0263  *             as a bus address
0264  * @sg:      SG entry
0265  *
0266  * Description:
0267  *   Returns true if sg_dma_mark_bus_address() has been called on
0268  *   this segment.
0269  **/
0270 static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
0271 {
0272     return sg->dma_flags & SG_DMA_BUS_ADDRESS;
0273 }
0274 
0275 /**
0276  * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
0277  * @sg:      SG entry
0278  *
0279  * Description:
0280  *   Marks the passed in sg entry to indicate that the dma_address is
0281  *   a bus address and doesn't need to be unmapped. This should only be
0282  *   used by dma_map_sg() implementations to mark bus addresses
0283  *   so they can be properly cleaned up in dma_unmap_sg().
0284  **/
0285 static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
0286 {
0287     sg->dma_flags |= SG_DMA_BUS_ADDRESS;
0288 }
0289 
0290 /**
0291  * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
0292  * @sg:      SG entry
0293  *
0294  * Description:
0295  *   Clears the bus address mark.
0296  **/
0297 static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
0298 {
0299     sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
0300 }
0301 
0302 #else
0303 
0304 static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
0305 {
0306     return false;
0307 }
0308 static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
0309 {
0310 }
0311 static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
0312 {
0313 }
0314 
0315 #endif
0316 
0317 /**
0318  * sg_phys - Return physical address of an sg entry
0319  * @sg:      SG entry
0320  *
0321  * Description:
0322  *   This calls page_to_phys() on the page in this sg entry, and adds the
0323  *   sg offset. The caller must know that it is legal to call page_to_phys()
0324  *   on the sg page.
0325  *
0326  **/
0327 static inline dma_addr_t sg_phys(struct scatterlist *sg)
0328 {
0329     return page_to_phys(sg_page(sg)) + sg->offset;
0330 }
0331 
0332 /**
0333  * sg_virt - Return virtual address of an sg entry
0334  * @sg:      SG entry
0335  *
0336  * Description:
0337  *   This calls page_address() on the page in this sg entry, and adds the
0338  *   sg offset. The caller must know that the sg page has a valid virtual
0339  *   mapping.
0340  *
0341  **/
0342 static inline void *sg_virt(struct scatterlist *sg)
0343 {
0344     return page_address(sg_page(sg)) + sg->offset;
0345 }
0346 
0347 /**
0348  * sg_init_marker - Initialize markers in sg table
0349  * @sgl:       The SG table
0350  * @nents:     Number of entries in table
0351  *
0352  **/
0353 static inline void sg_init_marker(struct scatterlist *sgl,
0354                   unsigned int nents)
0355 {
0356     sg_mark_end(&sgl[nents - 1]);
0357 }
0358 
0359 int sg_nents(struct scatterlist *sg);
0360 int sg_nents_for_len(struct scatterlist *sg, u64 len);
0361 struct scatterlist *sg_next(struct scatterlist *);
0362 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
0363 void sg_init_table(struct scatterlist *, unsigned int);
0364 void sg_init_one(struct scatterlist *, const void *, unsigned int);
0365 int sg_split(struct scatterlist *in, const int in_mapped_nents,
0366          const off_t skip, const int nb_splits,
0367          const size_t *split_sizes,
0368          struct scatterlist **out, int *out_mapped_nents,
0369          gfp_t gfp_mask);
0370 
0371 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
0372 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
0373 
0374 void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
0375              sg_free_fn *, unsigned int);
0376 void sg_free_table(struct sg_table *);
0377 void sg_free_append_table(struct sg_append_table *sgt);
0378 int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
0379              struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
0380 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
0381 int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
0382                      struct page **pages, unsigned int n_pages,
0383                      unsigned int offset, unsigned long size,
0384                      unsigned int max_segment,
0385                      unsigned int left_pages, gfp_t gfp_mask);
0386 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
0387                       unsigned int n_pages, unsigned int offset,
0388                       unsigned long size,
0389                       unsigned int max_segment, gfp_t gfp_mask);
0390 
0391 /**
0392  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
0393  *                 an array of pages
0394  * @sgt:     The sg table header to use
0395  * @pages:   Pointer to an array of page pointers
0396  * @n_pages:     Number of pages in the pages array
0397  * @offset:      Offset from start of the first page to the start of a buffer
0398  * @size:        Number of valid bytes in the buffer (after offset)
0399  * @gfp_mask:    GFP allocation mask
0400  *
0401  *  Description:
0402  *    Allocate and initialize an sg table from a list of pages. Contiguous
0403  *    ranges of the pages are squashed into a single scatterlist node. A user
0404  *    may provide an offset at a start and a size of valid data in a buffer
0405  *    specified by the page array. The returned sg table is released by
0406  *    sg_free_table.
0407  *
0408  * Returns:
0409  *   0 on success, negative error on failure
0410  */
0411 static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
0412                         struct page **pages,
0413                         unsigned int n_pages,
0414                         unsigned int offset,
0415                         unsigned long size, gfp_t gfp_mask)
0416 {
0417     return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
0418                          size, UINT_MAX, gfp_mask);
0419 }
0420 
0421 #ifdef CONFIG_SGL_ALLOC
0422 struct scatterlist *sgl_alloc_order(unsigned long long length,
0423                     unsigned int order, bool chainable,
0424                     gfp_t gfp, unsigned int *nent_p);
0425 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
0426                   unsigned int *nent_p);
0427 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
0428 void sgl_free_order(struct scatterlist *sgl, int order);
0429 void sgl_free(struct scatterlist *sgl);
0430 #endif /* CONFIG_SGL_ALLOC */
0431 
0432 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
0433               size_t buflen, off_t skip, bool to_buffer);
0434 
0435 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0436                const void *buf, size_t buflen);
0437 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
0438              void *buf, size_t buflen);
0439 
0440 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
0441                 const void *buf, size_t buflen, off_t skip);
0442 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
0443               void *buf, size_t buflen, off_t skip);
0444 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
0445                size_t buflen, off_t skip);
0446 
0447 /*
0448  * Maximum number of entries that will be allocated in one piece, if
0449  * a list larger than this is required then chaining will be utilized.
0450  */
0451 #define SG_MAX_SINGLE_ALLOC     (PAGE_SIZE / sizeof(struct scatterlist))
0452 
0453 /*
0454  * The maximum number of SG segments that we will put inside a
0455  * scatterlist (unless chaining is used). Should ideally fit inside a
0456  * single page, to avoid a higher order allocation.  We could define this
0457  * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
0458  * minimum value is 32
0459  */
0460 #define SG_CHUNK_SIZE   128
0461 
0462 /*
0463  * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
0464  * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
0465  */
0466 #ifdef CONFIG_ARCH_NO_SG_CHAIN
0467 #define SG_MAX_SEGMENTS SG_CHUNK_SIZE
0468 #else
0469 #define SG_MAX_SEGMENTS 2048
0470 #endif
0471 
0472 #ifdef CONFIG_SG_POOL
0473 void sg_free_table_chained(struct sg_table *table,
0474                unsigned nents_first_chunk);
0475 int sg_alloc_table_chained(struct sg_table *table, int nents,
0476                struct scatterlist *first_chunk,
0477                unsigned nents_first_chunk);
0478 #endif
0479 
0480 /*
0481  * sg page iterator
0482  *
0483  * Iterates over sg entries page-by-page.  On each successful iteration, you
0484  * can call sg_page_iter_page(@piter) to get the current page.
0485  * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to
0486  * the page's page offset within the sg. The iteration will stop either when a
0487  * maximum number of sg entries was reached or a terminating sg
0488  * (sg_last(sg) == true) was reached.
0489  */
0490 struct sg_page_iter {
0491     struct scatterlist  *sg;        /* sg holding the page */
0492     unsigned int        sg_pgoffset;    /* page offset within the sg */
0493 
0494     /* these are internal states, keep away */
0495     unsigned int        __nents;    /* remaining sg entries */
0496     int         __pg_advance;   /* nr pages to advance at the
0497                          * next step */
0498 };
0499 
0500 /*
0501  * sg page iterator for DMA addresses
0502  *
0503  * This is the same as sg_page_iter however you can call
0504  * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
0505  * address. sg_page_iter_page() cannot be called on this iterator.
0506  */
0507 struct sg_dma_page_iter {
0508     struct sg_page_iter base;
0509 };
0510 
0511 bool __sg_page_iter_next(struct sg_page_iter *piter);
0512 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
0513 void __sg_page_iter_start(struct sg_page_iter *piter,
0514               struct scatterlist *sglist, unsigned int nents,
0515               unsigned long pgoffset);
0516 /**
0517  * sg_page_iter_page - get the current page held by the page iterator
0518  * @piter:  page iterator holding the page
0519  */
0520 static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
0521 {
0522     return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
0523 }
0524 
0525 /**
0526  * sg_page_iter_dma_address - get the dma address of the current page held by
0527  * the page iterator.
0528  * @dma_iter:   page iterator holding the page
0529  */
0530 static inline dma_addr_t
0531 sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
0532 {
0533     return sg_dma_address(dma_iter->base.sg) +
0534            (dma_iter->base.sg_pgoffset << PAGE_SHIFT);
0535 }
0536 
0537 /**
0538  * for_each_sg_page - iterate over the pages of the given sg list
0539  * @sglist: sglist to iterate over
0540  * @piter:  page iterator to hold current page, sg, sg_pgoffset
0541  * @nents:  maximum number of sg entries to iterate over
0542  * @pgoffset:   starting page offset (in pages)
0543  *
0544  * Callers may use sg_page_iter_page() to get each page pointer.
0545  * In each loop it operates on PAGE_SIZE unit.
0546  */
0547 #define for_each_sg_page(sglist, piter, nents, pgoffset)           \
0548     for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
0549          __sg_page_iter_next(piter);)
0550 
0551 /**
0552  * for_each_sg_dma_page - iterate over the pages of the given sg list
0553  * @sglist: sglist to iterate over
0554  * @dma_iter:   DMA page iterator to hold current page
0555  * @dma_nents:  maximum number of sg entries to iterate over, this is the value
0556  *              returned from dma_map_sg
0557  * @pgoffset:   starting page offset (in pages)
0558  *
0559  * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
0560  * In each loop it operates on PAGE_SIZE unit.
0561  */
0562 #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset)            \
0563     for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents,        \
0564                   pgoffset);                                   \
0565          __sg_page_iter_dma_next(dma_iter);)
0566 
0567 /**
0568  * for_each_sgtable_page - iterate over all pages in the sg_table object
0569  * @sgt:    sg_table object to iterate over
0570  * @piter:  page iterator to hold current page
0571  * @pgoffset:   starting page offset (in pages)
0572  *
0573  * Iterates over the all memory pages in the buffer described by
0574  * a scatterlist stored in the given sg_table object.
0575  * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
0576  */
0577 #define for_each_sgtable_page(sgt, piter, pgoffset) \
0578     for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
0579 
0580 /**
0581  * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
0582  * @sgt:    sg_table object to iterate over
0583  * @dma_iter:   DMA page iterator to hold current page
0584  * @pgoffset:   starting page offset (in pages)
0585  *
0586  * Iterates over the all DMA mapped pages in the buffer described by
0587  * a scatterlist stored in the given sg_table object.
0588  * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
0589  * unit.
0590  */
0591 #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset)  \
0592     for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
0593 
0594 
0595 /*
0596  * Mapping sg iterator
0597  *
0598  * Iterates over sg entries mapping page-by-page.  On each successful
0599  * iteration, @miter->page points to the mapped page and
0600  * @miter->length bytes of data can be accessed at @miter->addr.  As
0601  * long as an iteration is enclosed between start and stop, the user
0602  * is free to choose control structure and when to stop.
0603  *
0604  * @miter->consumed is set to @miter->length on each iteration.  It
0605  * can be adjusted if the user can't consume all the bytes in one go.
0606  * Also, a stopped iteration can be resumed by calling next on it.
0607  * This is useful when iteration needs to release all resources and
0608  * continue later (e.g. at the next interrupt).
0609  */
0610 
0611 #define SG_MITER_ATOMIC     (1 << 0)     /* use kmap_atomic */
0612 #define SG_MITER_TO_SG      (1 << 1)    /* flush back to phys on unmap */
0613 #define SG_MITER_FROM_SG    (1 << 2)    /* nop */
0614 
0615 struct sg_mapping_iter {
0616     /* the following three fields can be accessed directly */
0617     struct page     *page;      /* currently mapped page */
0618     void            *addr;      /* pointer to the mapped area */
0619     size_t          length;     /* length of the mapped area */
0620     size_t          consumed;   /* number of consumed bytes */
0621     struct sg_page_iter piter;      /* page iterator */
0622 
0623     /* these are internal states, keep away */
0624     unsigned int        __offset;   /* offset within page */
0625     unsigned int        __remaining;    /* remaining bytes on page */
0626     unsigned int        __flags;
0627 };
0628 
0629 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
0630             unsigned int nents, unsigned int flags);
0631 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
0632 bool sg_miter_next(struct sg_mapping_iter *miter);
0633 void sg_miter_stop(struct sg_mapping_iter *miter);
0634 
0635 #endif /* _LINUX_SCATTERLIST_H */