Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright © 2006-2015, Intel Corporation.
0004  *
0005  * Authors: Ashok Raj <ashok.raj@intel.com>
0006  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
0007  *          David Woodhouse <David.Woodhouse@intel.com>
0008  */
0009 
0010 #ifndef _INTEL_IOMMU_H_
0011 #define _INTEL_IOMMU_H_
0012 
0013 #include <linux/types.h>
0014 #include <linux/iova.h>
0015 #include <linux/io.h>
0016 #include <linux/idr.h>
0017 #include <linux/mmu_notifier.h>
0018 #include <linux/list.h>
0019 #include <linux/iommu.h>
0020 #include <linux/io-64-nonatomic-lo-hi.h>
0021 #include <linux/dmar.h>
0022 #include <linux/ioasid.h>
0023 #include <linux/bitfield.h>
0024 #include <linux/xarray.h>
0025 
0026 #include <asm/cacheflush.h>
0027 #include <asm/iommu.h>
0028 
0029 /*
0030  * VT-d hardware uses 4KiB page size regardless of host page size.
0031  */
0032 #define VTD_PAGE_SHIFT      (12)
0033 #define VTD_PAGE_SIZE       (1UL << VTD_PAGE_SHIFT)
0034 #define VTD_PAGE_MASK       (((u64)-1) << VTD_PAGE_SHIFT)
0035 #define VTD_PAGE_ALIGN(addr)    (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
0036 
0037 #define VTD_STRIDE_SHIFT        (9)
0038 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
0039 
0040 #define DMA_PTE_READ        BIT_ULL(0)
0041 #define DMA_PTE_WRITE       BIT_ULL(1)
0042 #define DMA_PTE_LARGE_PAGE  BIT_ULL(7)
0043 #define DMA_PTE_SNP     BIT_ULL(11)
0044 
0045 #define DMA_FL_PTE_PRESENT  BIT_ULL(0)
0046 #define DMA_FL_PTE_US       BIT_ULL(2)
0047 #define DMA_FL_PTE_ACCESS   BIT_ULL(5)
0048 #define DMA_FL_PTE_DIRTY    BIT_ULL(6)
0049 #define DMA_FL_PTE_XD       BIT_ULL(63)
0050 
0051 #define ADDR_WIDTH_5LEVEL   (57)
0052 #define ADDR_WIDTH_4LEVEL   (48)
0053 
0054 #define CONTEXT_TT_MULTI_LEVEL  0
0055 #define CONTEXT_TT_DEV_IOTLB    1
0056 #define CONTEXT_TT_PASS_THROUGH 2
0057 #define CONTEXT_PASIDE      BIT_ULL(3)
0058 
0059 /*
0060  * Intel IOMMU register specification per version 1.0 public spec.
0061  */
0062 #define DMAR_VER_REG    0x0 /* Arch version supported by this IOMMU */
0063 #define DMAR_CAP_REG    0x8 /* Hardware supported capabilities */
0064 #define DMAR_ECAP_REG   0x10    /* Extended capabilities supported */
0065 #define DMAR_GCMD_REG   0x18    /* Global command register */
0066 #define DMAR_GSTS_REG   0x1c    /* Global status register */
0067 #define DMAR_RTADDR_REG 0x20    /* Root entry table */
0068 #define DMAR_CCMD_REG   0x28    /* Context command reg */
0069 #define DMAR_FSTS_REG   0x34    /* Fault Status register */
0070 #define DMAR_FECTL_REG  0x38    /* Fault control register */
0071 #define DMAR_FEDATA_REG 0x3c    /* Fault event interrupt data register */
0072 #define DMAR_FEADDR_REG 0x40    /* Fault event interrupt addr register */
0073 #define DMAR_FEUADDR_REG 0x44   /* Upper address register */
0074 #define DMAR_AFLOG_REG  0x58    /* Advanced Fault control */
0075 #define DMAR_PMEN_REG   0x64    /* Enable Protected Memory Region */
0076 #define DMAR_PLMBASE_REG 0x68   /* PMRR Low addr */
0077 #define DMAR_PLMLIMIT_REG 0x6c  /* PMRR low limit */
0078 #define DMAR_PHMBASE_REG 0x70   /* pmrr high base addr */
0079 #define DMAR_PHMLIMIT_REG 0x78  /* pmrr high limit */
0080 #define DMAR_IQH_REG    0x80    /* Invalidation queue head register */
0081 #define DMAR_IQT_REG    0x88    /* Invalidation queue tail register */
0082 #define DMAR_IQ_SHIFT   4   /* Invalidation queue head/tail shift */
0083 #define DMAR_IQA_REG    0x90    /* Invalidation queue addr register */
0084 #define DMAR_ICS_REG    0x9c    /* Invalidation complete status register */
0085 #define DMAR_IQER_REG   0xb0    /* Invalidation queue error record register */
0086 #define DMAR_IRTA_REG   0xb8    /* Interrupt remapping table addr register */
0087 #define DMAR_PQH_REG    0xc0    /* Page request queue head register */
0088 #define DMAR_PQT_REG    0xc8    /* Page request queue tail register */
0089 #define DMAR_PQA_REG    0xd0    /* Page request queue address register */
0090 #define DMAR_PRS_REG    0xdc    /* Page request status register */
0091 #define DMAR_PECTL_REG  0xe0    /* Page request event control register */
0092 #define DMAR_PEDATA_REG 0xe4    /* Page request event interrupt data register */
0093 #define DMAR_PEADDR_REG 0xe8    /* Page request event interrupt addr register */
0094 #define DMAR_PEUADDR_REG 0xec   /* Page request event Upper address register */
0095 #define DMAR_MTRRCAP_REG 0x100  /* MTRR capability register */
0096 #define DMAR_MTRRDEF_REG 0x108  /* MTRR default type register */
0097 #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
0098 #define DMAR_MTRR_FIX16K_80000_REG 0x128
0099 #define DMAR_MTRR_FIX16K_A0000_REG 0x130
0100 #define DMAR_MTRR_FIX4K_C0000_REG 0x138
0101 #define DMAR_MTRR_FIX4K_C8000_REG 0x140
0102 #define DMAR_MTRR_FIX4K_D0000_REG 0x148
0103 #define DMAR_MTRR_FIX4K_D8000_REG 0x150
0104 #define DMAR_MTRR_FIX4K_E0000_REG 0x158
0105 #define DMAR_MTRR_FIX4K_E8000_REG 0x160
0106 #define DMAR_MTRR_FIX4K_F0000_REG 0x168
0107 #define DMAR_MTRR_FIX4K_F8000_REG 0x170
0108 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
0109 #define DMAR_MTRR_PHYSMASK0_REG 0x188
0110 #define DMAR_MTRR_PHYSBASE1_REG 0x190
0111 #define DMAR_MTRR_PHYSMASK1_REG 0x198
0112 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0
0113 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8
0114 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0
0115 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8
0116 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0
0117 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8
0118 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0
0119 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8
0120 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0
0121 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8
0122 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0
0123 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8
0124 #define DMAR_MTRR_PHYSBASE8_REG 0x200
0125 #define DMAR_MTRR_PHYSMASK8_REG 0x208
0126 #define DMAR_MTRR_PHYSBASE9_REG 0x210
0127 #define DMAR_MTRR_PHYSMASK9_REG 0x218
0128 #define DMAR_VCCAP_REG      0xe30 /* Virtual command capability register */
0129 #define DMAR_VCMD_REG       0xe00 /* Virtual command register */
0130 #define DMAR_VCRSP_REG      0xe10 /* Virtual command response register */
0131 
0132 #define DMAR_IQER_REG_IQEI(reg)     FIELD_GET(GENMASK_ULL(3, 0), reg)
0133 #define DMAR_IQER_REG_ITESID(reg)   FIELD_GET(GENMASK_ULL(47, 32), reg)
0134 #define DMAR_IQER_REG_ICESID(reg)   FIELD_GET(GENMASK_ULL(63, 48), reg)
0135 
0136 #define OFFSET_STRIDE       (9)
0137 
0138 #define dmar_readq(a) readq(a)
0139 #define dmar_writeq(a,v) writeq(v,a)
0140 #define dmar_readl(a) readl(a)
0141 #define dmar_writel(a, v) writel(v, a)
0142 
0143 #define DMAR_VER_MAJOR(v)       (((v) & 0xf0) >> 4)
0144 #define DMAR_VER_MINOR(v)       ((v) & 0x0f)
0145 
0146 /*
0147  * Decoding Capability Register
0148  */
0149 #define cap_5lp_support(c)  (((c) >> 60) & 1)
0150 #define cap_pi_support(c)   (((c) >> 59) & 1)
0151 #define cap_fl1gp_support(c)    (((c) >> 56) & 1)
0152 #define cap_read_drain(c)   (((c) >> 55) & 1)
0153 #define cap_write_drain(c)  (((c) >> 54) & 1)
0154 #define cap_max_amask_val(c)    (((c) >> 48) & 0x3f)
0155 #define cap_num_fault_regs(c)   ((((c) >> 40) & 0xff) + 1)
0156 #define cap_pgsel_inv(c)    (((c) >> 39) & 1)
0157 
0158 #define cap_super_page_val(c)   (((c) >> 34) & 0xf)
0159 #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
0160                     * OFFSET_STRIDE) + 21)
0161 
0162 #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
0163 #define cap_max_fault_reg_offset(c) \
0164     (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
0165 
0166 #define cap_zlr(c)      (((c) >> 22) & 1)
0167 #define cap_isoch(c)        (((c) >> 23) & 1)
0168 #define cap_mgaw(c)     ((((c) >> 16) & 0x3f) + 1)
0169 #define cap_sagaw(c)        (((c) >> 8) & 0x1f)
0170 #define cap_caching_mode(c) (((c) >> 7) & 1)
0171 #define cap_phmr(c)     (((c) >> 6) & 1)
0172 #define cap_plmr(c)     (((c) >> 5) & 1)
0173 #define cap_rwbf(c)     (((c) >> 4) & 1)
0174 #define cap_afl(c)      (((c) >> 3) & 1)
0175 #define cap_ndoms(c)        (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
0176 /*
0177  * Extended Capability Register
0178  */
0179 
0180 #define ecap_rps(e)     (((e) >> 49) & 0x1)
0181 #define ecap_smpwc(e)       (((e) >> 48) & 0x1)
0182 #define ecap_flts(e)        (((e) >> 47) & 0x1)
0183 #define ecap_slts(e)        (((e) >> 46) & 0x1)
0184 #define ecap_slads(e)       (((e) >> 45) & 0x1)
0185 #define ecap_vcs(e)     (((e) >> 44) & 0x1)
0186 #define ecap_smts(e)        (((e) >> 43) & 0x1)
0187 #define ecap_dit(e)     (((e) >> 41) & 0x1)
0188 #define ecap_pds(e)     (((e) >> 42) & 0x1)
0189 #define ecap_pasid(e)       (((e) >> 40) & 0x1)
0190 #define ecap_pss(e)     (((e) >> 35) & 0x1f)
0191 #define ecap_eafs(e)        (((e) >> 34) & 0x1)
0192 #define ecap_nwfs(e)        (((e) >> 33) & 0x1)
0193 #define ecap_srs(e)     (((e) >> 31) & 0x1)
0194 #define ecap_ers(e)     (((e) >> 30) & 0x1)
0195 #define ecap_prs(e)     (((e) >> 29) & 0x1)
0196 #define ecap_broken_pasid(e)    (((e) >> 28) & 0x1)
0197 #define ecap_dis(e)     (((e) >> 27) & 0x1)
0198 #define ecap_nest(e)        (((e) >> 26) & 0x1)
0199 #define ecap_mts(e)     (((e) >> 25) & 0x1)
0200 #define ecap_iotlb_offset(e)    ((((e) >> 8) & 0x3ff) * 16)
0201 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
0202 #define ecap_coherent(e)    ((e) & 0x1)
0203 #define ecap_qis(e)     ((e) & 0x2)
0204 #define ecap_pass_through(e)    (((e) >> 6) & 0x1)
0205 #define ecap_eim_support(e) (((e) >> 4) & 0x1)
0206 #define ecap_ir_support(e)  (((e) >> 3) & 0x1)
0207 #define ecap_dev_iotlb_support(e)   (((e) >> 2) & 0x1)
0208 #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
0209 #define ecap_sc_support(e)  (((e) >> 7) & 0x1) /* Snooping Control */
0210 
0211 /* Virtual command interface capability */
0212 #define vccap_pasid(v)      (((v) & DMA_VCS_PAS)) /* PASID allocation */
0213 
0214 /* IOTLB_REG */
0215 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
0216 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
0217 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
0218 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
0219 #define DMA_TLB_IIRG(type) ((type >> 60) & 3)
0220 #define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
0221 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
0222 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
0223 #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
0224 #define DMA_TLB_IVT (((u64)1) << 63)
0225 #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
0226 #define DMA_TLB_MAX_SIZE (0x3f)
0227 
0228 /* INVALID_DESC */
0229 #define DMA_CCMD_INVL_GRANU_OFFSET  61
0230 #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
0231 #define DMA_ID_TLB_DSI_FLUSH    (((u64)2) << 4)
0232 #define DMA_ID_TLB_PSI_FLUSH    (((u64)3) << 4)
0233 #define DMA_ID_TLB_READ_DRAIN   (((u64)1) << 7)
0234 #define DMA_ID_TLB_WRITE_DRAIN  (((u64)1) << 6)
0235 #define DMA_ID_TLB_DID(id)  (((u64)((id & 0xffff) << 16)))
0236 #define DMA_ID_TLB_IH_NONLEAF   (((u64)1) << 6)
0237 #define DMA_ID_TLB_ADDR(addr)   (addr)
0238 #define DMA_ID_TLB_ADDR_MASK(mask)  (mask)
0239 
0240 /* PMEN_REG */
0241 #define DMA_PMEN_EPM (((u32)1)<<31)
0242 #define DMA_PMEN_PRS (((u32)1)<<0)
0243 
0244 /* GCMD_REG */
0245 #define DMA_GCMD_TE (((u32)1) << 31)
0246 #define DMA_GCMD_SRTP (((u32)1) << 30)
0247 #define DMA_GCMD_SFL (((u32)1) << 29)
0248 #define DMA_GCMD_EAFL (((u32)1) << 28)
0249 #define DMA_GCMD_WBF (((u32)1) << 27)
0250 #define DMA_GCMD_QIE (((u32)1) << 26)
0251 #define DMA_GCMD_SIRTP (((u32)1) << 24)
0252 #define DMA_GCMD_IRE (((u32) 1) << 25)
0253 #define DMA_GCMD_CFI (((u32) 1) << 23)
0254 
0255 /* GSTS_REG */
0256 #define DMA_GSTS_TES (((u32)1) << 31)
0257 #define DMA_GSTS_RTPS (((u32)1) << 30)
0258 #define DMA_GSTS_FLS (((u32)1) << 29)
0259 #define DMA_GSTS_AFLS (((u32)1) << 28)
0260 #define DMA_GSTS_WBFS (((u32)1) << 27)
0261 #define DMA_GSTS_QIES (((u32)1) << 26)
0262 #define DMA_GSTS_IRTPS (((u32)1) << 24)
0263 #define DMA_GSTS_IRES (((u32)1) << 25)
0264 #define DMA_GSTS_CFIS (((u32)1) << 23)
0265 
0266 /* DMA_RTADDR_REG */
0267 #define DMA_RTADDR_SMT (((u64)1) << 10)
0268 
0269 /* CCMD_REG */
0270 #define DMA_CCMD_ICC (((u64)1) << 63)
0271 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
0272 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
0273 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
0274 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
0275 #define DMA_CCMD_MASK_NOBIT 0
0276 #define DMA_CCMD_MASK_1BIT 1
0277 #define DMA_CCMD_MASK_2BIT 2
0278 #define DMA_CCMD_MASK_3BIT 3
0279 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
0280 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
0281 
0282 /* FECTL_REG */
0283 #define DMA_FECTL_IM (((u32)1) << 31)
0284 
0285 /* FSTS_REG */
0286 #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
0287 #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
0288 #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
0289 #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
0290 #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
0291 #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
0292 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
0293 
0294 /* FRCD_REG, 32 bits access */
0295 #define DMA_FRCD_F (((u32)1) << 31)
0296 #define dma_frcd_type(d) ((d >> 30) & 1)
0297 #define dma_frcd_fault_reason(c) (c & 0xff)
0298 #define dma_frcd_source_id(c) (c & 0xffff)
0299 #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
0300 #define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
0301 /* low 64 bit */
0302 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
0303 
0304 /* PRS_REG */
0305 #define DMA_PRS_PPR ((u32)1)
0306 #define DMA_PRS_PRO ((u32)2)
0307 
0308 #define DMA_VCS_PAS ((u64)1)
0309 
0310 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)         \
0311 do {                                    \
0312     cycles_t start_time = get_cycles();             \
0313     while (1) {                         \
0314         sts = op(iommu->reg + offset);              \
0315         if (cond)                       \
0316             break;                      \
0317         if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
0318             panic("DMAR hardware is malfunctioning\n"); \
0319         cpu_relax();                        \
0320     }                               \
0321 } while (0)
0322 
0323 #define QI_LENGTH   256 /* queue length */
0324 
0325 enum {
0326     QI_FREE,
0327     QI_IN_USE,
0328     QI_DONE,
0329     QI_ABORT
0330 };
0331 
0332 #define QI_CC_TYPE      0x1
0333 #define QI_IOTLB_TYPE       0x2
0334 #define QI_DIOTLB_TYPE      0x3
0335 #define QI_IEC_TYPE     0x4
0336 #define QI_IWD_TYPE     0x5
0337 #define QI_EIOTLB_TYPE      0x6
0338 #define QI_PC_TYPE      0x7
0339 #define QI_DEIOTLB_TYPE     0x8
0340 #define QI_PGRP_RESP_TYPE   0x9
0341 #define QI_PSTRM_RESP_TYPE  0xa
0342 
0343 #define QI_IEC_SELECTIVE    (((u64)1) << 4)
0344 #define QI_IEC_IIDEX(idx)   (((u64)(idx & 0xffff) << 32))
0345 #define QI_IEC_IM(m)        (((u64)(m & 0x1f) << 27))
0346 
0347 #define QI_IWD_STATUS_DATA(d)   (((u64)d) << 32)
0348 #define QI_IWD_STATUS_WRITE (((u64)1) << 5)
0349 #define QI_IWD_FENCE        (((u64)1) << 6)
0350 #define QI_IWD_PRQ_DRAIN    (((u64)1) << 7)
0351 
0352 #define QI_IOTLB_DID(did)   (((u64)did) << 16)
0353 #define QI_IOTLB_DR(dr)     (((u64)dr) << 7)
0354 #define QI_IOTLB_DW(dw)     (((u64)dw) << 6)
0355 #define QI_IOTLB_GRAN(gran)     (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
0356 #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
0357 #define QI_IOTLB_IH(ih)     (((u64)ih) << 6)
0358 #define QI_IOTLB_AM(am)     (((u8)am) & 0x3f)
0359 
0360 #define QI_CC_FM(fm)        (((u64)fm) << 48)
0361 #define QI_CC_SID(sid)      (((u64)sid) << 32)
0362 #define QI_CC_DID(did)      (((u64)did) << 16)
0363 #define QI_CC_GRAN(gran)    (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
0364 
0365 #define QI_DEV_IOTLB_SID(sid)   ((u64)((sid) & 0xffff) << 32)
0366 #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
0367 #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
0368 #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
0369                    ((u64)((pfsid >> 4) & 0xfff) << 52))
0370 #define QI_DEV_IOTLB_SIZE   1
0371 #define QI_DEV_IOTLB_MAX_INVS   32
0372 
0373 #define QI_PC_PASID(pasid)  (((u64)pasid) << 32)
0374 #define QI_PC_DID(did)      (((u64)did) << 16)
0375 #define QI_PC_GRAN(gran)    (((u64)gran) << 4)
0376 
0377 /* PASID cache invalidation granu */
0378 #define QI_PC_ALL_PASIDS    0
0379 #define QI_PC_PASID_SEL     1
0380 #define QI_PC_GLOBAL        3
0381 
0382 #define QI_EIOTLB_ADDR(addr)    ((u64)(addr) & VTD_PAGE_MASK)
0383 #define QI_EIOTLB_IH(ih)    (((u64)ih) << 6)
0384 #define QI_EIOTLB_AM(am)    (((u64)am) & 0x3f)
0385 #define QI_EIOTLB_PASID(pasid)  (((u64)pasid) << 32)
0386 #define QI_EIOTLB_DID(did)  (((u64)did) << 16)
0387 #define QI_EIOTLB_GRAN(gran)    (((u64)gran) << 4)
0388 
0389 /* QI Dev-IOTLB inv granu */
0390 #define QI_DEV_IOTLB_GRAN_ALL       1
0391 #define QI_DEV_IOTLB_GRAN_PASID_SEL 0
0392 
0393 #define QI_DEV_EIOTLB_ADDR(a)   ((u64)(a) & VTD_PAGE_MASK)
0394 #define QI_DEV_EIOTLB_SIZE  (((u64)1) << 11)
0395 #define QI_DEV_EIOTLB_PASID(p)  ((u64)((p) & 0xfffff) << 32)
0396 #define QI_DEV_EIOTLB_SID(sid)  ((u64)((sid) & 0xffff) << 16)
0397 #define QI_DEV_EIOTLB_QDEP(qd)  ((u64)((qd) & 0x1f) << 4)
0398 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
0399                     ((u64)((pfsid >> 4) & 0xfff) << 52))
0400 #define QI_DEV_EIOTLB_MAX_INVS  32
0401 
0402 /* Page group response descriptor QW0 */
0403 #define QI_PGRP_PASID_P(p)  (((u64)(p)) << 4)
0404 #define QI_PGRP_PDP(p)      (((u64)(p)) << 5)
0405 #define QI_PGRP_RESP_CODE(res)  (((u64)(res)) << 12)
0406 #define QI_PGRP_DID(rid)    (((u64)(rid)) << 16)
0407 #define QI_PGRP_PASID(pasid)    (((u64)(pasid)) << 32)
0408 
0409 /* Page group response descriptor QW1 */
0410 #define QI_PGRP_LPIG(x)     (((u64)(x)) << 2)
0411 #define QI_PGRP_IDX(idx)    (((u64)(idx)) << 3)
0412 
0413 
0414 #define QI_RESP_SUCCESS     0x0
0415 #define QI_RESP_INVALID     0x1
0416 #define QI_RESP_FAILURE     0xf
0417 
0418 #define QI_GRAN_NONG_PASID      2
0419 #define QI_GRAN_PSI_PASID       3
0420 
0421 #define qi_shift(iommu)     (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
0422 
0423 struct qi_desc {
0424     u64 qw0;
0425     u64 qw1;
0426     u64 qw2;
0427     u64 qw3;
0428 };
0429 
0430 struct q_inval {
0431     raw_spinlock_t  q_lock;
0432     void        *desc;          /* invalidation queue */
0433     int             *desc_status;   /* desc status */
0434     int             free_head;      /* first free entry */
0435     int             free_tail;      /* last free entry */
0436     int             free_cnt;
0437 };
0438 
0439 struct dmar_pci_notify_info;
0440 
0441 #ifdef CONFIG_IRQ_REMAP
0442 /* 1MB - maximum possible interrupt remapping table size */
0443 #define INTR_REMAP_PAGE_ORDER   8
0444 #define INTR_REMAP_TABLE_REG_SIZE   0xf
0445 #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
0446 
0447 #define INTR_REMAP_TABLE_ENTRIES    65536
0448 
0449 struct irq_domain;
0450 
0451 struct ir_table {
0452     struct irte *base;
0453     unsigned long *bitmap;
0454 };
0455 
0456 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
0457 #else
0458 static inline void
0459 intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
0460 #endif
0461 
0462 struct iommu_flush {
0463     void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
0464                   u8 fm, u64 type);
0465     void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
0466                 unsigned int size_order, u64 type);
0467 };
0468 
0469 enum {
0470     SR_DMAR_FECTL_REG,
0471     SR_DMAR_FEDATA_REG,
0472     SR_DMAR_FEADDR_REG,
0473     SR_DMAR_FEUADDR_REG,
0474     MAX_SR_DMAR_REGS
0475 };
0476 
0477 #define VTD_FLAG_TRANS_PRE_ENABLED  (1 << 0)
0478 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED  (1 << 1)
0479 #define VTD_FLAG_SVM_CAPABLE        (1 << 2)
0480 
0481 extern int intel_iommu_sm;
0482 
0483 #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
0484 #define pasid_supported(iommu)  (sm_supported(iommu) &&         \
0485                  ecap_pasid((iommu)->ecap))
0486 
0487 struct pasid_entry;
0488 struct pasid_state_entry;
0489 struct page_req_dsc;
0490 
0491 /*
0492  * 0: Present
0493  * 1-11: Reserved
0494  * 12-63: Context Ptr (12 - (haw-1))
0495  * 64-127: Reserved
0496  */
0497 struct root_entry {
0498     u64     lo;
0499     u64     hi;
0500 };
0501 
0502 /*
0503  * low 64 bits:
0504  * 0: present
0505  * 1: fault processing disable
0506  * 2-3: translation type
0507  * 12-63: address space root
0508  * high 64 bits:
0509  * 0-2: address width
0510  * 3-6: aval
0511  * 8-23: domain id
0512  */
0513 struct context_entry {
0514     u64 lo;
0515     u64 hi;
0516 };
0517 
0518 /*
0519  * When VT-d works in the scalable mode, it allows DMA translation to
0520  * happen through either first level or second level page table. This
0521  * bit marks that the DMA translation for the domain goes through the
0522  * first level page table, otherwise, it goes through the second level.
0523  */
0524 #define DOMAIN_FLAG_USE_FIRST_LEVEL     BIT(1)
0525 
0526 struct iommu_domain_info {
0527     struct intel_iommu *iommu;
0528     unsigned int refcnt;        /* Refcount of devices per iommu */
0529     u16 did;            /* Domain ids per IOMMU. Use u16 since
0530                      * domain ids are 16 bit wide according
0531                      * to VT-d spec, section 9.3 */
0532 };
0533 
0534 struct dmar_domain {
0535     int nid;            /* node id */
0536     struct xarray iommu_array;  /* Attached IOMMU array */
0537 
0538     u8 has_iotlb_device: 1;
0539     u8 iommu_coherency: 1;      /* indicate coherency of iommu access */
0540     u8 force_snooping : 1;      /* Create IOPTEs with snoop control */
0541     u8 set_pte_snp:1;
0542 
0543     spinlock_t lock;        /* Protect device tracking lists */
0544     struct list_head devices;   /* all devices' list */
0545 
0546     struct dma_pte  *pgd;       /* virtual address */
0547     int     gaw;        /* max guest address width */
0548 
0549     /* adjusted guest address width, 0 is level 2 30-bit */
0550     int     agaw;
0551 
0552     int     flags;      /* flags to find out type of domain */
0553     int     iommu_superpage;/* Level of superpages supported:
0554                        0 == 4KiB (no superpages), 1 == 2MiB,
0555                        2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
0556     u64     max_addr;   /* maximum mapped address */
0557 
0558     struct iommu_domain domain; /* generic domain data structure for
0559                        iommu core */
0560 };
0561 
0562 struct intel_iommu {
0563     void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
0564     u64         reg_phys; /* physical address of hw register set */
0565     u64     reg_size; /* size of hw register set */
0566     u64     cap;
0567     u64     ecap;
0568     u64     vccap;
0569     u32     gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
0570     raw_spinlock_t  register_lock; /* protect register handling */
0571     int     seq_id; /* sequence id of the iommu */
0572     int     agaw; /* agaw of this iommu */
0573     int     msagaw; /* max sagaw of this iommu */
0574     unsigned int    irq, pr_irq;
0575     u16     segment;     /* PCI segment# */
0576     unsigned char   name[13];    /* Device Name */
0577 
0578 #ifdef CONFIG_INTEL_IOMMU
0579     unsigned long   *domain_ids; /* bitmap of domains */
0580     unsigned long   *copied_tables; /* bitmap of copied tables */
0581     spinlock_t  lock; /* protect context, domain ids */
0582     struct root_entry *root_entry; /* virtual address */
0583 
0584     struct iommu_flush flush;
0585 #endif
0586 #ifdef CONFIG_INTEL_IOMMU_SVM
0587     struct page_req_dsc *prq;
0588     unsigned char prq_name[16];    /* Name for PRQ interrupt */
0589     struct completion prq_complete;
0590     struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
0591 #endif
0592     struct iopf_queue *iopf_queue;
0593     unsigned char iopfq_name[16];
0594     struct q_inval  *qi;            /* Queued invalidation info */
0595     u32 *iommu_state; /* Store iommu states between suspend and resume.*/
0596 
0597 #ifdef CONFIG_IRQ_REMAP
0598     struct ir_table *ir_table;  /* Interrupt remapping info */
0599     struct irq_domain *ir_domain;
0600     struct irq_domain *ir_msi_domain;
0601 #endif
0602     struct iommu_device iommu;  /* IOMMU core code handle */
0603     int     node;
0604     u32     flags;      /* Software defined flags */
0605 
0606     struct dmar_drhd_unit *drhd;
0607     void *perf_statistic;
0608 };
0609 
0610 /* PCI domain-device relationship */
0611 struct device_domain_info {
0612     struct list_head link;  /* link to domain siblings */
0613     u32 segment;        /* PCI segment number */
0614     u8 bus;         /* PCI bus number */
0615     u8 devfn;       /* PCI devfn number */
0616     u16 pfsid;      /* SRIOV physical function source ID */
0617     u8 pasid_supported:3;
0618     u8 pasid_enabled:1;
0619     u8 pri_supported:1;
0620     u8 pri_enabled:1;
0621     u8 ats_supported:1;
0622     u8 ats_enabled:1;
0623     u8 ats_qdep;
0624     struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
0625     struct intel_iommu *iommu; /* IOMMU used by this device */
0626     struct dmar_domain *domain; /* pointer to domain */
0627     struct pasid_table *pasid_table; /* pasid table */
0628 };
0629 
0630 static inline void __iommu_flush_cache(
0631     struct intel_iommu *iommu, void *addr, int size)
0632 {
0633     if (!ecap_coherent(iommu->ecap))
0634         clflush_cache_range(addr, size);
0635 }
0636 
0637 /* Convert generic struct iommu_domain to private struct dmar_domain */
0638 static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
0639 {
0640     return container_of(dom, struct dmar_domain, domain);
0641 }
0642 
0643 /* Retrieve the domain ID which has allocated to the domain */
0644 static inline u16
0645 domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
0646 {
0647     struct iommu_domain_info *info =
0648             xa_load(&domain->iommu_array, iommu->seq_id);
0649 
0650     return info->did;
0651 }
0652 
0653 /*
0654  * 0: readable
0655  * 1: writable
0656  * 2-6: reserved
0657  * 7: super page
0658  * 8-10: available
0659  * 11: snoop behavior
0660  * 12-63: Host physical address
0661  */
0662 struct dma_pte {
0663     u64 val;
0664 };
0665 
0666 static inline void dma_clear_pte(struct dma_pte *pte)
0667 {
0668     pte->val = 0;
0669 }
0670 
0671 static inline u64 dma_pte_addr(struct dma_pte *pte)
0672 {
0673 #ifdef CONFIG_64BIT
0674     return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
0675 #else
0676     /* Must have a full atomic 64-bit read */
0677     return  __cmpxchg64(&pte->val, 0ULL, 0ULL) &
0678             VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
0679 #endif
0680 }
0681 
0682 static inline bool dma_pte_present(struct dma_pte *pte)
0683 {
0684     return (pte->val & 3) != 0;
0685 }
0686 
0687 static inline bool dma_pte_superpage(struct dma_pte *pte)
0688 {
0689     return (pte->val & DMA_PTE_LARGE_PAGE);
0690 }
0691 
0692 static inline bool first_pte_in_page(struct dma_pte *pte)
0693 {
0694     return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
0695 }
0696 
0697 static inline int nr_pte_to_next_page(struct dma_pte *pte)
0698 {
0699     return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
0700         (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
0701 }
0702 
0703 static inline bool context_present(struct context_entry *context)
0704 {
0705     return (context->lo & 1);
0706 }
0707 
0708 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
0709 
0710 extern int dmar_enable_qi(struct intel_iommu *iommu);
0711 extern void dmar_disable_qi(struct intel_iommu *iommu);
0712 extern int dmar_reenable_qi(struct intel_iommu *iommu);
0713 extern void qi_global_iec(struct intel_iommu *iommu);
0714 
0715 extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
0716                  u8 fm, u64 type);
0717 extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
0718               unsigned int size_order, u64 type);
0719 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
0720             u16 qdep, u64 addr, unsigned mask);
0721 
0722 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
0723              unsigned long npages, bool ih);
0724 
0725 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
0726                   u32 pasid, u16 qdep, u64 addr,
0727                   unsigned int size_order);
0728 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
0729               u32 pasid);
0730 
0731 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
0732            unsigned int count, unsigned long options);
0733 /*
0734  * Options used in qi_submit_sync:
0735  * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
0736  */
0737 #define QI_OPT_WAIT_DRAIN       BIT(0)
0738 
0739 extern int dmar_ir_support(void);
0740 
0741 void *alloc_pgtable_page(int node);
0742 void free_pgtable_page(void *vaddr);
0743 void iommu_flush_write_buffer(struct intel_iommu *iommu);
0744 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
0745 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
0746 
0747 #ifdef CONFIG_INTEL_IOMMU_SVM
0748 extern void intel_svm_check(struct intel_iommu *iommu);
0749 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
0750 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
0751 struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
0752                  void *drvdata);
0753 void intel_svm_unbind(struct iommu_sva *handle);
0754 u32 intel_svm_get_pasid(struct iommu_sva *handle);
0755 int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
0756                 struct iommu_page_response *msg);
0757 
0758 struct intel_svm_dev {
0759     struct list_head list;
0760     struct rcu_head rcu;
0761     struct device *dev;
0762     struct intel_iommu *iommu;
0763     struct iommu_sva sva;
0764     unsigned long prq_seq_number;
0765     u32 pasid;
0766     int users;
0767     u16 did;
0768     u16 dev_iotlb:1;
0769     u16 sid, qdep;
0770 };
0771 
0772 struct intel_svm {
0773     struct mmu_notifier notifier;
0774     struct mm_struct *mm;
0775 
0776     unsigned int flags;
0777     u32 pasid;
0778     struct list_head devs;
0779 };
0780 #else
0781 static inline void intel_svm_check(struct intel_iommu *iommu) {}
0782 #endif
0783 
0784 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
0785 void intel_iommu_debugfs_init(void);
0786 #else
0787 static inline void intel_iommu_debugfs_init(void) {}
0788 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
0789 
0790 extern const struct attribute_group *intel_iommu_groups[];
0791 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
0792                      u8 devfn, int alloc);
0793 
0794 extern const struct iommu_ops intel_iommu_ops;
0795 
0796 #ifdef CONFIG_INTEL_IOMMU
0797 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
0798 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
0799 extern int dmar_disabled;
0800 extern int intel_iommu_enabled;
0801 #else
0802 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
0803 {
0804     return 0;
0805 }
0806 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
0807 {
0808     return 0;
0809 }
0810 #define dmar_disabled   (1)
0811 #define intel_iommu_enabled (0)
0812 #endif
0813 
0814 static inline const char *decode_prq_descriptor(char *str, size_t size,
0815         u64 dw0, u64 dw1, u64 dw2, u64 dw3)
0816 {
0817     char *buf = str;
0818     int bytes;
0819 
0820     bytes = snprintf(buf, size,
0821              "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
0822              FIELD_GET(GENMASK_ULL(31, 16), dw0),
0823              FIELD_GET(GENMASK_ULL(63, 12), dw1),
0824              dw1 & BIT_ULL(0) ? 'r' : '-',
0825              dw1 & BIT_ULL(1) ? 'w' : '-',
0826              dw0 & BIT_ULL(52) ? 'x' : '-',
0827              dw0 & BIT_ULL(53) ? 'p' : '-',
0828              dw1 & BIT_ULL(2) ? 'l' : '-',
0829              FIELD_GET(GENMASK_ULL(51, 32), dw0),
0830              FIELD_GET(GENMASK_ULL(11, 3), dw1));
0831 
0832     /* Private Data */
0833     if (dw0 & BIT_ULL(9)) {
0834         size -= bytes;
0835         buf += bytes;
0836         snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
0837     }
0838 
0839     return str;
0840 }
0841 
0842 #endif