0001
0002
0003
0004
0005
0006
0007 #ifndef __LINUX_IOMMU_H
0008 #define __LINUX_IOMMU_H
0009
0010 #include <linux/scatterlist.h>
0011 #include <linux/device.h>
0012 #include <linux/types.h>
0013 #include <linux/errno.h>
0014 #include <linux/err.h>
0015 #include <linux/of.h>
0016 #include <linux/ioasid.h>
0017 #include <uapi/linux/iommu.h>
0018
0019 #define IOMMU_READ (1 << 0)
0020 #define IOMMU_WRITE (1 << 1)
0021 #define IOMMU_CACHE (1 << 2)
0022 #define IOMMU_NOEXEC (1 << 3)
0023 #define IOMMU_MMIO (1 << 4)
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define IOMMU_PRIV (1 << 5)
0034
0035 struct iommu_ops;
0036 struct iommu_group;
0037 struct bus_type;
0038 struct device;
0039 struct iommu_domain;
0040 struct iommu_domain_ops;
0041 struct notifier_block;
0042 struct iommu_sva;
0043 struct iommu_fault_event;
0044 struct iommu_dma_cookie;
0045
0046
0047 #define IOMMU_FAULT_READ 0x0
0048 #define IOMMU_FAULT_WRITE 0x1
0049
0050 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
0051 struct device *, unsigned long, int, void *);
0052 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
0053
0054 struct iommu_domain_geometry {
0055 dma_addr_t aperture_start;
0056 dma_addr_t aperture_end;
0057 bool force_aperture;
0058 };
0059
0060
0061 #define __IOMMU_DOMAIN_PAGING (1U << 0)
0062 #define __IOMMU_DOMAIN_DMA_API (1U << 1)
0063
0064 #define __IOMMU_DOMAIN_PT (1U << 2)
0065 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3)
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 #define IOMMU_DOMAIN_BLOCKED (0U)
0082 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
0083 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
0084 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
0085 __IOMMU_DOMAIN_DMA_API)
0086 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
0087 __IOMMU_DOMAIN_DMA_API | \
0088 __IOMMU_DOMAIN_DMA_FQ)
0089
0090 struct iommu_domain {
0091 unsigned type;
0092 const struct iommu_domain_ops *ops;
0093 unsigned long pgsize_bitmap;
0094 iommu_fault_handler_t handler;
0095 void *handler_token;
0096 struct iommu_domain_geometry geometry;
0097 struct iommu_dma_cookie *iova_cookie;
0098 };
0099
0100 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
0101 {
0102 return domain->type & __IOMMU_DOMAIN_DMA_API;
0103 }
0104
0105 enum iommu_cap {
0106 IOMMU_CAP_CACHE_COHERENCY,
0107 IOMMU_CAP_INTR_REMAP,
0108 IOMMU_CAP_NOEXEC,
0109 IOMMU_CAP_PRE_BOOT_PROTECTION,
0110
0111 };
0112
0113
0114 enum iommu_resv_type {
0115
0116 IOMMU_RESV_DIRECT,
0117
0118
0119
0120
0121
0122 IOMMU_RESV_DIRECT_RELAXABLE,
0123
0124 IOMMU_RESV_RESERVED,
0125
0126 IOMMU_RESV_MSI,
0127
0128 IOMMU_RESV_SW_MSI,
0129 };
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 struct iommu_resv_region {
0141 struct list_head list;
0142 phys_addr_t start;
0143 size_t length;
0144 int prot;
0145 enum iommu_resv_type type;
0146 void (*free)(struct device *dev, struct iommu_resv_region *region);
0147 };
0148
0149 struct iommu_iort_rmr_data {
0150 struct iommu_resv_region rr;
0151
0152
0153 const u32 *sids;
0154 u32 num_sids;
0155 };
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 enum iommu_dev_features {
0170 IOMMU_DEV_FEAT_SVA,
0171 IOMMU_DEV_FEAT_IOPF,
0172 };
0173
0174 #define IOMMU_PASID_INVALID (-1U)
0175
0176 #ifdef CONFIG_IOMMU_API
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 struct iommu_iotlb_gather {
0195 unsigned long start;
0196 unsigned long end;
0197 size_t pgsize;
0198 struct list_head freelist;
0199 bool queued;
0200 };
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 struct iommu_ops {
0230 bool (*capable)(enum iommu_cap);
0231
0232
0233 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
0234
0235 struct iommu_device *(*probe_device)(struct device *dev);
0236 void (*release_device)(struct device *dev);
0237 void (*probe_finalize)(struct device *dev);
0238 struct iommu_group *(*device_group)(struct device *dev);
0239
0240
0241 void (*get_resv_regions)(struct device *dev, struct list_head *list);
0242
0243 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
0244 bool (*is_attach_deferred)(struct device *dev);
0245
0246
0247 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
0248 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
0249
0250 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
0251 void *drvdata);
0252 void (*sva_unbind)(struct iommu_sva *handle);
0253 u32 (*sva_get_pasid)(struct iommu_sva *handle);
0254
0255 int (*page_response)(struct device *dev,
0256 struct iommu_fault_event *evt,
0257 struct iommu_page_response *msg);
0258
0259 int (*def_domain_type)(struct device *dev);
0260
0261 const struct iommu_domain_ops *default_domain_ops;
0262 unsigned long pgsize_bitmap;
0263 struct module *owner;
0264 };
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 struct iommu_domain_ops {
0288 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
0289 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
0290
0291 int (*map)(struct iommu_domain *domain, unsigned long iova,
0292 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
0293 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
0294 phys_addr_t paddr, size_t pgsize, size_t pgcount,
0295 int prot, gfp_t gfp, size_t *mapped);
0296 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
0297 size_t size, struct iommu_iotlb_gather *iotlb_gather);
0298 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
0299 size_t pgsize, size_t pgcount,
0300 struct iommu_iotlb_gather *iotlb_gather);
0301
0302 void (*flush_iotlb_all)(struct iommu_domain *domain);
0303 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
0304 size_t size);
0305 void (*iotlb_sync)(struct iommu_domain *domain,
0306 struct iommu_iotlb_gather *iotlb_gather);
0307
0308 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
0309 dma_addr_t iova);
0310
0311 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
0312 int (*enable_nesting)(struct iommu_domain *domain);
0313 int (*set_pgtable_quirks)(struct iommu_domain *domain,
0314 unsigned long quirks);
0315
0316 void (*free)(struct iommu_domain *domain);
0317 };
0318
0319
0320
0321
0322
0323
0324
0325
0326 struct iommu_device {
0327 struct list_head list;
0328 const struct iommu_ops *ops;
0329 struct fwnode_handle *fwnode;
0330 struct device *dev;
0331 };
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 struct iommu_fault_event {
0343 struct iommu_fault fault;
0344 struct list_head list;
0345 };
0346
0347
0348
0349
0350
0351
0352
0353
0354 struct iommu_fault_param {
0355 iommu_dev_fault_handler_t handler;
0356 void *data;
0357 struct list_head faults;
0358 struct mutex lock;
0359 };
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 struct dev_iommu {
0374 struct mutex lock;
0375 struct iommu_fault_param *fault_param;
0376 struct iopf_device_param *iopf_param;
0377 struct iommu_fwspec *fwspec;
0378 struct iommu_device *iommu_dev;
0379 void *priv;
0380 };
0381
0382 int iommu_device_register(struct iommu_device *iommu,
0383 const struct iommu_ops *ops,
0384 struct device *hwdev);
0385 void iommu_device_unregister(struct iommu_device *iommu);
0386 int iommu_device_sysfs_add(struct iommu_device *iommu,
0387 struct device *parent,
0388 const struct attribute_group **groups,
0389 const char *fmt, ...) __printf(4, 5);
0390 void iommu_device_sysfs_remove(struct iommu_device *iommu);
0391 int iommu_device_link(struct iommu_device *iommu, struct device *link);
0392 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
0393 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
0394
0395 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
0396 {
0397 return (struct iommu_device *)dev_get_drvdata(dev);
0398 }
0399
0400 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
0401 {
0402 *gather = (struct iommu_iotlb_gather) {
0403 .start = ULONG_MAX,
0404 .freelist = LIST_HEAD_INIT(gather->freelist),
0405 };
0406 }
0407
0408 static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
0409 {
0410
0411
0412
0413
0414
0415
0416 return dev->iommu->iommu_dev->ops;
0417 }
0418
0419 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
0420 extern int bus_iommu_probe(struct bus_type *bus);
0421 extern bool iommu_present(struct bus_type *bus);
0422 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
0423 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
0424 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
0425 extern struct iommu_group *iommu_group_get_by_id(int id);
0426 extern void iommu_domain_free(struct iommu_domain *domain);
0427 extern int iommu_attach_device(struct iommu_domain *domain,
0428 struct device *dev);
0429 extern void iommu_detach_device(struct iommu_domain *domain,
0430 struct device *dev);
0431 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
0432 struct device *dev, ioasid_t pasid);
0433 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
0434 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
0435 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
0436 phys_addr_t paddr, size_t size, int prot);
0437 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
0438 phys_addr_t paddr, size_t size, int prot);
0439 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
0440 size_t size);
0441 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
0442 unsigned long iova, size_t size,
0443 struct iommu_iotlb_gather *iotlb_gather);
0444 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
0445 struct scatterlist *sg, unsigned int nents, int prot);
0446 extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
0447 unsigned long iova, struct scatterlist *sg,
0448 unsigned int nents, int prot);
0449 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
0450 extern void iommu_set_fault_handler(struct iommu_domain *domain,
0451 iommu_fault_handler_t handler, void *token);
0452
0453 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
0454 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
0455 extern void iommu_set_default_passthrough(bool cmd_line);
0456 extern void iommu_set_default_translated(bool cmd_line);
0457 extern bool iommu_default_passthrough(void);
0458 extern struct iommu_resv_region *
0459 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
0460 enum iommu_resv_type type);
0461 extern int iommu_get_group_resv_regions(struct iommu_group *group,
0462 struct list_head *head);
0463
0464 extern int iommu_attach_group(struct iommu_domain *domain,
0465 struct iommu_group *group);
0466 extern void iommu_detach_group(struct iommu_domain *domain,
0467 struct iommu_group *group);
0468 extern struct iommu_group *iommu_group_alloc(void);
0469 extern void *iommu_group_get_iommudata(struct iommu_group *group);
0470 extern void iommu_group_set_iommudata(struct iommu_group *group,
0471 void *iommu_data,
0472 void (*release)(void *iommu_data));
0473 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
0474 extern int iommu_group_add_device(struct iommu_group *group,
0475 struct device *dev);
0476 extern void iommu_group_remove_device(struct device *dev);
0477 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
0478 int (*fn)(struct device *, void *));
0479 extern struct iommu_group *iommu_group_get(struct device *dev);
0480 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
0481 extern void iommu_group_put(struct iommu_group *group);
0482 extern int iommu_register_device_fault_handler(struct device *dev,
0483 iommu_dev_fault_handler_t handler,
0484 void *data);
0485
0486 extern int iommu_unregister_device_fault_handler(struct device *dev);
0487
0488 extern int iommu_report_device_fault(struct device *dev,
0489 struct iommu_fault_event *evt);
0490 extern int iommu_page_response(struct device *dev,
0491 struct iommu_page_response *msg);
0492
0493 extern int iommu_group_id(struct iommu_group *group);
0494 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
0495
0496 int iommu_enable_nesting(struct iommu_domain *domain);
0497 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
0498 unsigned long quirks);
0499
0500 void iommu_set_dma_strict(void);
0501
0502 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
0503 unsigned long iova, int flags);
0504
0505 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
0506 {
0507 if (domain->ops->flush_iotlb_all)
0508 domain->ops->flush_iotlb_all(domain);
0509 }
0510
0511 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
0512 struct iommu_iotlb_gather *iotlb_gather)
0513 {
0514 if (domain->ops->iotlb_sync)
0515 domain->ops->iotlb_sync(domain, iotlb_gather);
0516
0517 iommu_iotlb_gather_init(iotlb_gather);
0518 }
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 static inline
0532 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
0533 unsigned long iova, size_t size)
0534 {
0535 unsigned long start = iova, end = start + size - 1;
0536
0537 return gather->end != 0 &&
0538 (end + 1 < gather->start || start > gather->end + 1);
0539 }
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
0553 unsigned long iova, size_t size)
0554 {
0555 unsigned long end = iova + size - 1;
0556
0557 if (gather->start > iova)
0558 gather->start = iova;
0559 if (gather->end < end)
0560 gather->end = end;
0561 }
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
0575 struct iommu_iotlb_gather *gather,
0576 unsigned long iova, size_t size)
0577 {
0578
0579
0580
0581
0582
0583 if ((gather->pgsize && gather->pgsize != size) ||
0584 iommu_iotlb_gather_is_disjoint(gather, iova, size))
0585 iommu_iotlb_sync(domain, gather);
0586
0587 gather->pgsize = size;
0588 iommu_iotlb_gather_add_range(gather, iova, size);
0589 }
0590
0591 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
0592 {
0593 return gather && gather->queued;
0594 }
0595
0596
0597 extern struct iommu_group *pci_device_group(struct device *dev);
0598
0599 extern struct iommu_group *generic_device_group(struct device *dev);
0600
0601 struct iommu_group *fsl_mc_device_group(struct device *dev);
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 struct iommu_fwspec {
0612 const struct iommu_ops *ops;
0613 struct fwnode_handle *iommu_fwnode;
0614 u32 flags;
0615 unsigned int num_ids;
0616 u32 ids[];
0617 };
0618
0619
0620 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
0621
0622
0623
0624
0625 struct iommu_sva {
0626 struct device *dev;
0627 };
0628
0629 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
0630 const struct iommu_ops *ops);
0631 void iommu_fwspec_free(struct device *dev);
0632 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
0633 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
0634
0635 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
0636 {
0637 if (dev->iommu)
0638 return dev->iommu->fwspec;
0639 else
0640 return NULL;
0641 }
0642
0643 static inline void dev_iommu_fwspec_set(struct device *dev,
0644 struct iommu_fwspec *fwspec)
0645 {
0646 dev->iommu->fwspec = fwspec;
0647 }
0648
0649 static inline void *dev_iommu_priv_get(struct device *dev)
0650 {
0651 if (dev->iommu)
0652 return dev->iommu->priv;
0653 else
0654 return NULL;
0655 }
0656
0657 static inline void dev_iommu_priv_set(struct device *dev, void *priv)
0658 {
0659 dev->iommu->priv = priv;
0660 }
0661
0662 int iommu_probe_device(struct device *dev);
0663 void iommu_release_device(struct device *dev);
0664
0665 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
0666 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
0667
0668 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
0669 struct mm_struct *mm,
0670 void *drvdata);
0671 void iommu_sva_unbind_device(struct iommu_sva *handle);
0672 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
0673
0674 int iommu_device_use_default_domain(struct device *dev);
0675 void iommu_device_unuse_default_domain(struct device *dev);
0676
0677 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
0678 void iommu_group_release_dma_owner(struct iommu_group *group);
0679 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
0680
0681 #else
0682
0683 struct iommu_ops {};
0684 struct iommu_group {};
0685 struct iommu_fwspec {};
0686 struct iommu_device {};
0687 struct iommu_fault_param {};
0688 struct iommu_iotlb_gather {};
0689
0690 static inline bool iommu_present(struct bus_type *bus)
0691 {
0692 return false;
0693 }
0694
0695 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
0696 {
0697 return false;
0698 }
0699
0700 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
0701 {
0702 return false;
0703 }
0704
0705 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
0706 {
0707 return NULL;
0708 }
0709
0710 static inline struct iommu_group *iommu_group_get_by_id(int id)
0711 {
0712 return NULL;
0713 }
0714
0715 static inline void iommu_domain_free(struct iommu_domain *domain)
0716 {
0717 }
0718
0719 static inline int iommu_attach_device(struct iommu_domain *domain,
0720 struct device *dev)
0721 {
0722 return -ENODEV;
0723 }
0724
0725 static inline void iommu_detach_device(struct iommu_domain *domain,
0726 struct device *dev)
0727 {
0728 }
0729
0730 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
0731 {
0732 return NULL;
0733 }
0734
0735 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
0736 phys_addr_t paddr, size_t size, int prot)
0737 {
0738 return -ENODEV;
0739 }
0740
0741 static inline int iommu_map_atomic(struct iommu_domain *domain,
0742 unsigned long iova, phys_addr_t paddr,
0743 size_t size, int prot)
0744 {
0745 return -ENODEV;
0746 }
0747
0748 static inline size_t iommu_unmap(struct iommu_domain *domain,
0749 unsigned long iova, size_t size)
0750 {
0751 return 0;
0752 }
0753
0754 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
0755 unsigned long iova, int gfp_order,
0756 struct iommu_iotlb_gather *iotlb_gather)
0757 {
0758 return 0;
0759 }
0760
0761 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
0762 unsigned long iova, struct scatterlist *sg,
0763 unsigned int nents, int prot)
0764 {
0765 return -ENODEV;
0766 }
0767
0768 static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
0769 unsigned long iova, struct scatterlist *sg,
0770 unsigned int nents, int prot)
0771 {
0772 return -ENODEV;
0773 }
0774
0775 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
0776 {
0777 }
0778
0779 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
0780 struct iommu_iotlb_gather *iotlb_gather)
0781 {
0782 }
0783
0784 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
0785 {
0786 return 0;
0787 }
0788
0789 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
0790 iommu_fault_handler_t handler, void *token)
0791 {
0792 }
0793
0794 static inline void iommu_get_resv_regions(struct device *dev,
0795 struct list_head *list)
0796 {
0797 }
0798
0799 static inline void iommu_put_resv_regions(struct device *dev,
0800 struct list_head *list)
0801 {
0802 }
0803
0804 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
0805 struct list_head *head)
0806 {
0807 return -ENODEV;
0808 }
0809
0810 static inline void iommu_set_default_passthrough(bool cmd_line)
0811 {
0812 }
0813
0814 static inline void iommu_set_default_translated(bool cmd_line)
0815 {
0816 }
0817
0818 static inline bool iommu_default_passthrough(void)
0819 {
0820 return true;
0821 }
0822
0823 static inline int iommu_attach_group(struct iommu_domain *domain,
0824 struct iommu_group *group)
0825 {
0826 return -ENODEV;
0827 }
0828
0829 static inline void iommu_detach_group(struct iommu_domain *domain,
0830 struct iommu_group *group)
0831 {
0832 }
0833
0834 static inline struct iommu_group *iommu_group_alloc(void)
0835 {
0836 return ERR_PTR(-ENODEV);
0837 }
0838
0839 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
0840 {
0841 return NULL;
0842 }
0843
0844 static inline void iommu_group_set_iommudata(struct iommu_group *group,
0845 void *iommu_data,
0846 void (*release)(void *iommu_data))
0847 {
0848 }
0849
0850 static inline int iommu_group_set_name(struct iommu_group *group,
0851 const char *name)
0852 {
0853 return -ENODEV;
0854 }
0855
0856 static inline int iommu_group_add_device(struct iommu_group *group,
0857 struct device *dev)
0858 {
0859 return -ENODEV;
0860 }
0861
0862 static inline void iommu_group_remove_device(struct device *dev)
0863 {
0864 }
0865
0866 static inline int iommu_group_for_each_dev(struct iommu_group *group,
0867 void *data,
0868 int (*fn)(struct device *, void *))
0869 {
0870 return -ENODEV;
0871 }
0872
0873 static inline struct iommu_group *iommu_group_get(struct device *dev)
0874 {
0875 return NULL;
0876 }
0877
0878 static inline void iommu_group_put(struct iommu_group *group)
0879 {
0880 }
0881
0882 static inline
0883 int iommu_register_device_fault_handler(struct device *dev,
0884 iommu_dev_fault_handler_t handler,
0885 void *data)
0886 {
0887 return -ENODEV;
0888 }
0889
0890 static inline int iommu_unregister_device_fault_handler(struct device *dev)
0891 {
0892 return 0;
0893 }
0894
0895 static inline
0896 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
0897 {
0898 return -ENODEV;
0899 }
0900
0901 static inline int iommu_page_response(struct device *dev,
0902 struct iommu_page_response *msg)
0903 {
0904 return -ENODEV;
0905 }
0906
0907 static inline int iommu_group_id(struct iommu_group *group)
0908 {
0909 return -ENODEV;
0910 }
0911
0912 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
0913 unsigned long quirks)
0914 {
0915 return 0;
0916 }
0917
0918 static inline int iommu_device_register(struct iommu_device *iommu,
0919 const struct iommu_ops *ops,
0920 struct device *hwdev)
0921 {
0922 return -ENODEV;
0923 }
0924
0925 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
0926 {
0927 return NULL;
0928 }
0929
0930 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
0931 {
0932 }
0933
0934 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
0935 struct iommu_iotlb_gather *gather,
0936 unsigned long iova, size_t size)
0937 {
0938 }
0939
0940 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
0941 {
0942 return false;
0943 }
0944
0945 static inline void iommu_device_unregister(struct iommu_device *iommu)
0946 {
0947 }
0948
0949 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
0950 struct device *parent,
0951 const struct attribute_group **groups,
0952 const char *fmt, ...)
0953 {
0954 return -ENODEV;
0955 }
0956
0957 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
0958 {
0959 }
0960
0961 static inline int iommu_device_link(struct device *dev, struct device *link)
0962 {
0963 return -EINVAL;
0964 }
0965
0966 static inline void iommu_device_unlink(struct device *dev, struct device *link)
0967 {
0968 }
0969
0970 static inline int iommu_fwspec_init(struct device *dev,
0971 struct fwnode_handle *iommu_fwnode,
0972 const struct iommu_ops *ops)
0973 {
0974 return -ENODEV;
0975 }
0976
0977 static inline void iommu_fwspec_free(struct device *dev)
0978 {
0979 }
0980
0981 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
0982 int num_ids)
0983 {
0984 return -ENODEV;
0985 }
0986
0987 static inline
0988 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
0989 {
0990 return NULL;
0991 }
0992
0993 static inline int
0994 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
0995 {
0996 return -ENODEV;
0997 }
0998
0999 static inline int
1000 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1001 {
1002 return -ENODEV;
1003 }
1004
1005 static inline struct iommu_sva *
1006 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1007 {
1008 return NULL;
1009 }
1010
1011 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1012 {
1013 }
1014
1015 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1016 {
1017 return IOMMU_PASID_INVALID;
1018 }
1019
1020 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1021 {
1022 return NULL;
1023 }
1024
1025 static inline int iommu_device_use_default_domain(struct device *dev)
1026 {
1027 return 0;
1028 }
1029
1030 static inline void iommu_device_unuse_default_domain(struct device *dev)
1031 {
1032 }
1033
1034 static inline int
1035 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1036 {
1037 return -ENODEV;
1038 }
1039
1040 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1041 {
1042 }
1043
1044 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1045 {
1046 return false;
1047 }
1048 #endif
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1061 unsigned long iova, struct sg_table *sgt, int prot)
1062 {
1063 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
1064 }
1065
1066 #ifdef CONFIG_IOMMU_DEBUGFS
1067 extern struct dentry *iommu_debugfs_dir;
1068 void iommu_debugfs_setup(void);
1069 #else
1070 static inline void iommu_debugfs_setup(void) {}
1071 #endif
1072
1073 #endif