Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_SWIOTLB_H
0003 #define __LINUX_SWIOTLB_H
0004 
0005 #include <linux/device.h>
0006 #include <linux/dma-direction.h>
0007 #include <linux/init.h>
0008 #include <linux/types.h>
0009 #include <linux/limits.h>
0010 #include <linux/spinlock.h>
0011 
0012 struct device;
0013 struct page;
0014 struct scatterlist;
0015 
0016 #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
0017 #define SWIOTLB_FORCE   (1 << 1) /* force bounce buffering */
0018 #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
0019 
0020 /*
0021  * Maximum allowable number of contiguous slabs to map,
0022  * must be a power of 2.  What is the appropriate value ?
0023  * The complexity of {map,unmap}_single is linearly dependent on this value.
0024  */
0025 #define IO_TLB_SEGSIZE  128
0026 
0027 /*
0028  * log of the size of each IO TLB slab.  The number of slabs is command line
0029  * controllable.
0030  */
0031 #define IO_TLB_SHIFT 11
0032 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
0033 
0034 /* default to 64MB */
0035 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
0036 
0037 unsigned long swiotlb_size_or_default(void);
0038 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
0039     int (*remap)(void *tlb, unsigned long nslabs));
0040 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
0041     int (*remap)(void *tlb, unsigned long nslabs));
0042 extern void __init swiotlb_update_mem_attributes(void);
0043 
0044 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
0045         size_t mapping_size, size_t alloc_size,
0046         unsigned int alloc_aligned_mask, enum dma_data_direction dir,
0047         unsigned long attrs);
0048 
0049 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
0050                      phys_addr_t tlb_addr,
0051                      size_t mapping_size,
0052                      enum dma_data_direction dir,
0053                      unsigned long attrs);
0054 
0055 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
0056         size_t size, enum dma_data_direction dir);
0057 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
0058         size_t size, enum dma_data_direction dir);
0059 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
0060         size_t size, enum dma_data_direction dir, unsigned long attrs);
0061 
0062 #ifdef CONFIG_SWIOTLB
0063 
0064 /**
0065  * struct io_tlb_mem - IO TLB Memory Pool Descriptor
0066  *
0067  * @start:  The start address of the swiotlb memory pool. Used to do a quick
0068  *      range check to see if the memory was in fact allocated by this
0069  *      API.
0070  * @end:    The end address of the swiotlb memory pool. Used to do a quick
0071  *      range check to see if the memory was in fact allocated by this
0072  *      API.
0073  * @vaddr:  The vaddr of the swiotlb memory pool. The swiotlb memory pool
0074  *      may be remapped in the memory encrypted case and store virtual
0075  *      address for bounce buffer operation.
0076  * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
0077  *      @end. For default swiotlb, this is command line adjustable via
0078  *      setup_io_tlb_npages.
0079  * @used:   The number of used IO TLB block.
0080  * @list:   The free list describing the number of free entries available
0081  *      from each index.
0082  * @orig_addr:  The original address corresponding to a mapped entry.
0083  * @alloc_size: Size of the allocated buffer.
0084  * @debugfs:    The dentry to debugfs.
0085  * @late_alloc: %true if allocated using the page allocator
0086  * @force_bounce: %true if swiotlb bouncing is forced
0087  * @for_alloc:  %true if the pool is used for memory allocation
0088  * @nareas:  The area number in the pool.
0089  * @area_nslabs: The slot number in the area.
0090  */
0091 struct io_tlb_mem {
0092     phys_addr_t start;
0093     phys_addr_t end;
0094     void *vaddr;
0095     unsigned long nslabs;
0096     unsigned long used;
0097     struct dentry *debugfs;
0098     bool late_alloc;
0099     bool force_bounce;
0100     bool for_alloc;
0101     unsigned int nareas;
0102     unsigned int area_nslabs;
0103     struct io_tlb_area *areas;
0104     struct io_tlb_slot *slots;
0105 };
0106 extern struct io_tlb_mem io_tlb_default_mem;
0107 
0108 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
0109 {
0110     struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0111 
0112     return mem && paddr >= mem->start && paddr < mem->end;
0113 }
0114 
0115 static inline bool is_swiotlb_force_bounce(struct device *dev)
0116 {
0117     struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0118 
0119     return mem && mem->force_bounce;
0120 }
0121 
0122 void swiotlb_init(bool addressing_limited, unsigned int flags);
0123 void __init swiotlb_exit(void);
0124 unsigned int swiotlb_max_segment(void);
0125 size_t swiotlb_max_mapping_size(struct device *dev);
0126 bool is_swiotlb_active(struct device *dev);
0127 void __init swiotlb_adjust_size(unsigned long size);
0128 #else
0129 static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
0130 {
0131 }
0132 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
0133 {
0134     return false;
0135 }
0136 static inline bool is_swiotlb_force_bounce(struct device *dev)
0137 {
0138     return false;
0139 }
0140 static inline void swiotlb_exit(void)
0141 {
0142 }
0143 static inline unsigned int swiotlb_max_segment(void)
0144 {
0145     return 0;
0146 }
0147 static inline size_t swiotlb_max_mapping_size(struct device *dev)
0148 {
0149     return SIZE_MAX;
0150 }
0151 
0152 static inline bool is_swiotlb_active(struct device *dev)
0153 {
0154     return false;
0155 }
0156 
0157 static inline void swiotlb_adjust_size(unsigned long size)
0158 {
0159 }
0160 #endif /* CONFIG_SWIOTLB */
0161 
0162 extern void swiotlb_print_info(void);
0163 
0164 #ifdef CONFIG_DMA_RESTRICTED_POOL
0165 struct page *swiotlb_alloc(struct device *dev, size_t size);
0166 bool swiotlb_free(struct device *dev, struct page *page, size_t size);
0167 
0168 static inline bool is_swiotlb_for_alloc(struct device *dev)
0169 {
0170     return dev->dma_io_tlb_mem->for_alloc;
0171 }
0172 #else
0173 static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
0174 {
0175     return NULL;
0176 }
0177 static inline bool swiotlb_free(struct device *dev, struct page *page,
0178                 size_t size)
0179 {
0180     return false;
0181 }
0182 static inline bool is_swiotlb_for_alloc(struct device *dev)
0183 {
0184     return false;
0185 }
0186 #endif /* CONFIG_DMA_RESTRICTED_POOL */
0187 
0188 extern phys_addr_t swiotlb_unencrypted_base;
0189 
0190 #endif /* __LINUX_SWIOTLB_H */