0001
0002 #ifndef __LINUX_SWIOTLB_H
0003 #define __LINUX_SWIOTLB_H
0004
0005 #include <linux/device.h>
0006 #include <linux/dma-direction.h>
0007 #include <linux/init.h>
0008 #include <linux/types.h>
0009 #include <linux/limits.h>
0010 #include <linux/spinlock.h>
0011
0012 struct device;
0013 struct page;
0014 struct scatterlist;
0015
0016 #define SWIOTLB_VERBOSE (1 << 0)
0017 #define SWIOTLB_FORCE (1 << 1)
0018 #define SWIOTLB_ANY (1 << 2)
0019
0020
0021
0022
0023
0024
0025 #define IO_TLB_SEGSIZE 128
0026
0027
0028
0029
0030
0031 #define IO_TLB_SHIFT 11
0032 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
0033
0034
0035 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
0036
0037 unsigned long swiotlb_size_or_default(void);
0038 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
0039 int (*remap)(void *tlb, unsigned long nslabs));
0040 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
0041 int (*remap)(void *tlb, unsigned long nslabs));
0042 extern void __init swiotlb_update_mem_attributes(void);
0043
0044 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
0045 size_t mapping_size, size_t alloc_size,
0046 unsigned int alloc_aligned_mask, enum dma_data_direction dir,
0047 unsigned long attrs);
0048
0049 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
0050 phys_addr_t tlb_addr,
0051 size_t mapping_size,
0052 enum dma_data_direction dir,
0053 unsigned long attrs);
0054
0055 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
0056 size_t size, enum dma_data_direction dir);
0057 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
0058 size_t size, enum dma_data_direction dir);
0059 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
0060 size_t size, enum dma_data_direction dir, unsigned long attrs);
0061
0062 #ifdef CONFIG_SWIOTLB
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 struct io_tlb_mem {
0092 phys_addr_t start;
0093 phys_addr_t end;
0094 void *vaddr;
0095 unsigned long nslabs;
0096 unsigned long used;
0097 struct dentry *debugfs;
0098 bool late_alloc;
0099 bool force_bounce;
0100 bool for_alloc;
0101 unsigned int nareas;
0102 unsigned int area_nslabs;
0103 struct io_tlb_area *areas;
0104 struct io_tlb_slot *slots;
0105 };
0106 extern struct io_tlb_mem io_tlb_default_mem;
0107
0108 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
0109 {
0110 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0111
0112 return mem && paddr >= mem->start && paddr < mem->end;
0113 }
0114
0115 static inline bool is_swiotlb_force_bounce(struct device *dev)
0116 {
0117 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
0118
0119 return mem && mem->force_bounce;
0120 }
0121
0122 void swiotlb_init(bool addressing_limited, unsigned int flags);
0123 void __init swiotlb_exit(void);
0124 unsigned int swiotlb_max_segment(void);
0125 size_t swiotlb_max_mapping_size(struct device *dev);
0126 bool is_swiotlb_active(struct device *dev);
0127 void __init swiotlb_adjust_size(unsigned long size);
0128 #else
0129 static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
0130 {
0131 }
0132 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
0133 {
0134 return false;
0135 }
0136 static inline bool is_swiotlb_force_bounce(struct device *dev)
0137 {
0138 return false;
0139 }
0140 static inline void swiotlb_exit(void)
0141 {
0142 }
0143 static inline unsigned int swiotlb_max_segment(void)
0144 {
0145 return 0;
0146 }
0147 static inline size_t swiotlb_max_mapping_size(struct device *dev)
0148 {
0149 return SIZE_MAX;
0150 }
0151
0152 static inline bool is_swiotlb_active(struct device *dev)
0153 {
0154 return false;
0155 }
0156
0157 static inline void swiotlb_adjust_size(unsigned long size)
0158 {
0159 }
0160 #endif
0161
0162 extern void swiotlb_print_info(void);
0163
0164 #ifdef CONFIG_DMA_RESTRICTED_POOL
0165 struct page *swiotlb_alloc(struct device *dev, size_t size);
0166 bool swiotlb_free(struct device *dev, struct page *page, size_t size);
0167
0168 static inline bool is_swiotlb_for_alloc(struct device *dev)
0169 {
0170 return dev->dma_io_tlb_mem->for_alloc;
0171 }
0172 #else
0173 static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
0174 {
0175 return NULL;
0176 }
0177 static inline bool swiotlb_free(struct device *dev, struct page *page,
0178 size_t size)
0179 {
0180 return false;
0181 }
0182 static inline bool is_swiotlb_for_alloc(struct device *dev)
0183 {
0184 return false;
0185 }
0186 #endif
0187
0188 extern phys_addr_t swiotlb_unencrypted_base;
0189
0190 #endif