0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _ASM_IOMMU_H
0009 #define _ASM_IOMMU_H
0010 #ifdef __KERNEL__
0011
0012 #include <linux/compiler.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/device.h>
0015 #include <linux/dma-map-ops.h>
0016 #include <linux/bitops.h>
0017 #include <asm/machdep.h>
0018 #include <asm/types.h>
0019 #include <asm/pci-bridge.h>
0020 #include <asm/asm-const.h>
0021
0022 #define IOMMU_PAGE_SHIFT_4K 12
0023 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
0024 #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
0025 #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
0026
0027 #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
0028 #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
0029 #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
0030
0031
0032 extern int iommu_is_off;
0033 extern int iommu_force_on;
0034
0035 struct iommu_table_ops {
0036
0037
0038
0039
0040 int (*set)(struct iommu_table *tbl,
0041 long index, long npages,
0042 unsigned long uaddr,
0043 enum dma_data_direction direction,
0044 unsigned long attrs);
0045 #ifdef CONFIG_IOMMU_API
0046
0047
0048
0049
0050
0051 int (*xchg_no_kill)(struct iommu_table *tbl,
0052 long index,
0053 unsigned long *hpa,
0054 enum dma_data_direction *direction);
0055
0056 void (*tce_kill)(struct iommu_table *tbl,
0057 unsigned long index,
0058 unsigned long pages);
0059
0060 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
0061 #endif
0062 void (*clear)(struct iommu_table *tbl,
0063 long index, long npages);
0064
0065 unsigned long (*get)(struct iommu_table *tbl, long index);
0066 void (*flush)(struct iommu_table *tbl);
0067 void (*free)(struct iommu_table *tbl);
0068 };
0069
0070
0071 extern struct iommu_table_ops iommu_table_lpar_multi_ops;
0072 extern struct iommu_table_ops iommu_table_pseries_ops;
0073
0074
0075
0076
0077
0078
0079 #define IOMAP_MAX_ORDER 13
0080
0081 #define IOMMU_POOL_HASHBITS 2
0082 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
0083
0084 struct iommu_pool {
0085 unsigned long start;
0086 unsigned long end;
0087 unsigned long hint;
0088 spinlock_t lock;
0089 } ____cacheline_aligned_in_smp;
0090
0091 struct iommu_table {
0092 unsigned long it_busno;
0093 unsigned long it_size;
0094 unsigned long it_indirect_levels;
0095 unsigned long it_level_size;
0096 unsigned long it_allocated_size;
0097 unsigned long it_offset;
0098 unsigned long it_base;
0099 unsigned long it_index;
0100 unsigned long it_type;
0101 unsigned long it_blocksize;
0102 unsigned long poolsize;
0103 unsigned long nr_pools;
0104 struct iommu_pool large_pool;
0105 struct iommu_pool pools[IOMMU_NR_POOLS];
0106 unsigned long *it_map;
0107 unsigned long it_page_shift;
0108 struct list_head it_group_list;
0109 __be64 *it_userspace;
0110 struct iommu_table_ops *it_ops;
0111 struct kref it_kref;
0112 int it_nid;
0113 unsigned long it_reserved_start;
0114 unsigned long it_reserved_end;
0115 };
0116
0117 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
0118 ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
0119 #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
0120 ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
0121
0122
0123 static inline __attribute_const__
0124 int get_iommu_order(unsigned long size, struct iommu_table *tbl)
0125 {
0126 return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
0127 }
0128
0129
0130 struct scatterlist;
0131
0132 #ifdef CONFIG_PPC64
0133
0134 static inline void set_iommu_table_base(struct device *dev,
0135 struct iommu_table *base)
0136 {
0137 dev->archdata.iommu_table_base = base;
0138 }
0139
0140 static inline void *get_iommu_table_base(struct device *dev)
0141 {
0142 return dev->archdata.iommu_table_base;
0143 }
0144
0145 extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
0146
0147 extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
0148 extern int iommu_tce_table_put(struct iommu_table *tbl);
0149
0150
0151
0152
0153 extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
0154 int nid, unsigned long res_start, unsigned long res_end);
0155 bool iommu_table_in_use(struct iommu_table *tbl);
0156
0157 #define IOMMU_TABLE_GROUP_MAX_TABLES 2
0158
0159 struct iommu_table_group;
0160
0161 struct iommu_table_group_ops {
0162 unsigned long (*get_table_size)(
0163 __u32 page_shift,
0164 __u64 window_size,
0165 __u32 levels);
0166 long (*create_table)(struct iommu_table_group *table_group,
0167 int num,
0168 __u32 page_shift,
0169 __u64 window_size,
0170 __u32 levels,
0171 struct iommu_table **ptbl);
0172 long (*set_window)(struct iommu_table_group *table_group,
0173 int num,
0174 struct iommu_table *tblnew);
0175 long (*unset_window)(struct iommu_table_group *table_group,
0176 int num);
0177
0178 void (*take_ownership)(struct iommu_table_group *table_group);
0179
0180 void (*release_ownership)(struct iommu_table_group *table_group);
0181 };
0182
0183 struct iommu_table_group_link {
0184 struct list_head next;
0185 struct rcu_head rcu;
0186 struct iommu_table_group *table_group;
0187 };
0188
0189 struct iommu_table_group {
0190
0191 __u32 tce32_start;
0192 __u32 tce32_size;
0193 __u64 pgsizes;
0194 __u32 max_dynamic_windows_supported;
0195 __u32 max_levels;
0196
0197 struct iommu_group *group;
0198 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
0199 struct iommu_table_group_ops *ops;
0200 };
0201
0202 #ifdef CONFIG_IOMMU_API
0203
0204 extern void iommu_register_group(struct iommu_table_group *table_group,
0205 int pci_domain_number, unsigned long pe_num);
0206 extern int iommu_add_device(struct iommu_table_group *table_group,
0207 struct device *dev);
0208 extern void iommu_del_device(struct device *dev);
0209 extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
0210 unsigned long entry, unsigned long *hpa,
0211 enum dma_data_direction *direction);
0212 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
0213 struct iommu_table *tbl,
0214 unsigned long entry, unsigned long *hpa,
0215 enum dma_data_direction *direction);
0216 extern void iommu_tce_kill(struct iommu_table *tbl,
0217 unsigned long entry, unsigned long pages);
0218 #else
0219 static inline void iommu_register_group(struct iommu_table_group *table_group,
0220 int pci_domain_number,
0221 unsigned long pe_num)
0222 {
0223 }
0224
0225 static inline int iommu_add_device(struct iommu_table_group *table_group,
0226 struct device *dev)
0227 {
0228 return 0;
0229 }
0230
0231 static inline void iommu_del_device(struct device *dev)
0232 {
0233 }
0234 #endif
0235
0236 u64 dma_iommu_get_required_mask(struct device *dev);
0237 #else
0238
0239 static inline void *get_iommu_table_base(struct device *dev)
0240 {
0241 return NULL;
0242 }
0243
0244 static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
0245 {
0246 return 0;
0247 }
0248
0249 #endif
0250
0251 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
0252 struct scatterlist *sglist, int nelems,
0253 unsigned long mask,
0254 enum dma_data_direction direction,
0255 unsigned long attrs);
0256 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
0257 struct scatterlist *sglist,
0258 int nelems,
0259 enum dma_data_direction direction,
0260 unsigned long attrs);
0261
0262 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
0263 size_t size, dma_addr_t *dma_handle,
0264 unsigned long mask, gfp_t flag, int node);
0265 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
0266 void *vaddr, dma_addr_t dma_handle);
0267 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
0268 struct page *page, unsigned long offset,
0269 size_t size, unsigned long mask,
0270 enum dma_data_direction direction,
0271 unsigned long attrs);
0272 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
0273 size_t size, enum dma_data_direction direction,
0274 unsigned long attrs);
0275
0276 void __init iommu_init_early_pSeries(void);
0277 extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
0278 extern void iommu_init_early_pasemi(void);
0279
0280 #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
0281 static inline void iommu_restore(void)
0282 {
0283 if (ppc_md.iommu_restore)
0284 ppc_md.iommu_restore();
0285 }
0286 #endif
0287
0288
0289 extern int iommu_tce_check_ioba(unsigned long page_shift,
0290 unsigned long offset, unsigned long size,
0291 unsigned long ioba, unsigned long npages);
0292 extern int iommu_tce_check_gpa(unsigned long page_shift,
0293 unsigned long gpa);
0294
0295 #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
0296 (iommu_tce_check_ioba((tbl)->it_page_shift, \
0297 (tbl)->it_offset, (tbl)->it_size, \
0298 (ioba), (npages)) || (tce_value))
0299 #define iommu_tce_put_param_check(tbl, ioba, gpa) \
0300 (iommu_tce_check_ioba((tbl)->it_page_shift, \
0301 (tbl)->it_offset, (tbl)->it_size, \
0302 (ioba), 1) || \
0303 iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
0304
0305 extern void iommu_flush_tce(struct iommu_table *tbl);
0306 extern int iommu_take_ownership(struct iommu_table *tbl);
0307 extern void iommu_release_ownership(struct iommu_table *tbl);
0308
0309 extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
0310 extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
0311
0312 #ifdef CONFIG_PPC_CELL_NATIVE
0313 extern bool iommu_fixed_is_weak;
0314 #else
0315 #define iommu_fixed_is_weak false
0316 #endif
0317
0318 extern const struct dma_map_ops dma_iommu_ops;
0319
0320 #endif
0321 #endif