Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2020 Google LLC
0004  * Author: Will Deacon <will@kernel.org>
0005  */
0006 
0007 #ifndef __ARM64_KVM_PGTABLE_H__
0008 #define __ARM64_KVM_PGTABLE_H__
0009 
0010 #include <linux/bits.h>
0011 #include <linux/kvm_host.h>
0012 #include <linux/types.h>
0013 
0014 #define KVM_PGTABLE_MAX_LEVELS      4U
0015 
0016 static inline u64 kvm_get_parange(u64 mmfr0)
0017 {
0018     u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
0019                 ID_AA64MMFR0_PARANGE_SHIFT);
0020     if (parange > ID_AA64MMFR0_PARANGE_MAX)
0021         parange = ID_AA64MMFR0_PARANGE_MAX;
0022 
0023     return parange;
0024 }
0025 
0026 typedef u64 kvm_pte_t;
0027 
0028 #define KVM_PTE_VALID           BIT(0)
0029 
0030 #define KVM_PTE_ADDR_MASK       GENMASK(47, PAGE_SHIFT)
0031 #define KVM_PTE_ADDR_51_48      GENMASK(15, 12)
0032 
0033 static inline bool kvm_pte_valid(kvm_pte_t pte)
0034 {
0035     return pte & KVM_PTE_VALID;
0036 }
0037 
0038 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
0039 {
0040     u64 pa = pte & KVM_PTE_ADDR_MASK;
0041 
0042     if (PAGE_SHIFT == 16)
0043         pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
0044 
0045     return pa;
0046 }
0047 
0048 static inline u64 kvm_granule_shift(u32 level)
0049 {
0050     /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
0051     return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
0052 }
0053 
0054 static inline u64 kvm_granule_size(u32 level)
0055 {
0056     return BIT(kvm_granule_shift(level));
0057 }
0058 
0059 static inline bool kvm_level_supports_block_mapping(u32 level)
0060 {
0061     /*
0062      * Reject invalid block mappings and don't bother with 4TB mappings for
0063      * 52-bit PAs.
0064      */
0065     return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
0066 }
0067 
0068 /**
0069  * struct kvm_pgtable_mm_ops - Memory management callbacks.
0070  * @zalloc_page:        Allocate a single zeroed memory page.
0071  *              The @arg parameter can be used by the walker
0072  *              to pass a memcache. The initial refcount of
0073  *              the page is 1.
0074  * @zalloc_pages_exact:     Allocate an exact number of zeroed memory pages.
0075  *              The @size parameter is in bytes, and is rounded
0076  *              up to the next page boundary. The resulting
0077  *              allocation is physically contiguous.
0078  * @free_pages_exact:       Free an exact number of memory pages previously
0079  *              allocated by zalloc_pages_exact.
0080  * @get_page:           Increment the refcount on a page.
0081  * @put_page:           Decrement the refcount on a page. When the
0082  *              refcount reaches 0 the page is automatically
0083  *              freed.
0084  * @page_count:         Return the refcount of a page.
0085  * @phys_to_virt:       Convert a physical address into a virtual
0086  *              address mapped in the current context.
0087  * @virt_to_phys:       Convert a virtual address mapped in the current
0088  *              context into a physical address.
0089  * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
0090  *              for the specified memory address range.
0091  * @icache_inval_pou:       Invalidate the instruction cache to the PoU
0092  *              for the specified memory address range.
0093  */
0094 struct kvm_pgtable_mm_ops {
0095     void*       (*zalloc_page)(void *arg);
0096     void*       (*zalloc_pages_exact)(size_t size);
0097     void        (*free_pages_exact)(void *addr, size_t size);
0098     void        (*get_page)(void *addr);
0099     void        (*put_page)(void *addr);
0100     int     (*page_count)(void *addr);
0101     void*       (*phys_to_virt)(phys_addr_t phys);
0102     phys_addr_t (*virt_to_phys)(void *addr);
0103     void        (*dcache_clean_inval_poc)(void *addr, size_t size);
0104     void        (*icache_inval_pou)(void *addr, size_t size);
0105 };
0106 
0107 /**
0108  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
0109  * @KVM_PGTABLE_S2_NOFWB:   Don't enforce Normal-WB even if the CPUs have
0110  *              ARM64_HAS_STAGE2_FWB.
0111  * @KVM_PGTABLE_S2_IDMAP:   Only use identity mappings.
0112  */
0113 enum kvm_pgtable_stage2_flags {
0114     KVM_PGTABLE_S2_NOFWB            = BIT(0),
0115     KVM_PGTABLE_S2_IDMAP            = BIT(1),
0116 };
0117 
0118 /**
0119  * enum kvm_pgtable_prot - Page-table permissions and attributes.
0120  * @KVM_PGTABLE_PROT_X:     Execute permission.
0121  * @KVM_PGTABLE_PROT_W:     Write permission.
0122  * @KVM_PGTABLE_PROT_R:     Read permission.
0123  * @KVM_PGTABLE_PROT_DEVICE:    Device attributes.
0124  * @KVM_PGTABLE_PROT_SW0:   Software bit 0.
0125  * @KVM_PGTABLE_PROT_SW1:   Software bit 1.
0126  * @KVM_PGTABLE_PROT_SW2:   Software bit 2.
0127  * @KVM_PGTABLE_PROT_SW3:   Software bit 3.
0128  */
0129 enum kvm_pgtable_prot {
0130     KVM_PGTABLE_PROT_X          = BIT(0),
0131     KVM_PGTABLE_PROT_W          = BIT(1),
0132     KVM_PGTABLE_PROT_R          = BIT(2),
0133 
0134     KVM_PGTABLE_PROT_DEVICE         = BIT(3),
0135 
0136     KVM_PGTABLE_PROT_SW0            = BIT(55),
0137     KVM_PGTABLE_PROT_SW1            = BIT(56),
0138     KVM_PGTABLE_PROT_SW2            = BIT(57),
0139     KVM_PGTABLE_PROT_SW3            = BIT(58),
0140 };
0141 
0142 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
0143 #define KVM_PGTABLE_PROT_RWX    (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
0144 
0145 #define PKVM_HOST_MEM_PROT  KVM_PGTABLE_PROT_RWX
0146 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
0147 
0148 #define PAGE_HYP        KVM_PGTABLE_PROT_RW
0149 #define PAGE_HYP_EXEC       (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
0150 #define PAGE_HYP_RO     (KVM_PGTABLE_PROT_R)
0151 #define PAGE_HYP_DEVICE     (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
0152 
0153 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
0154                        enum kvm_pgtable_prot prot);
0155 
0156 /**
0157  * struct kvm_pgtable - KVM page-table.
0158  * @ia_bits:        Maximum input address size, in bits.
0159  * @start_level:    Level at which the page-table walk starts.
0160  * @pgd:        Pointer to the first top-level entry of the page-table.
0161  * @mm_ops:     Memory management callbacks.
0162  * @mmu:        Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
0163  * @flags:      Stage-2 page-table flags.
0164  * @force_pte_cb:   Function that returns true if page level mappings must
0165  *          be used instead of block mappings.
0166  */
0167 struct kvm_pgtable {
0168     u32                 ia_bits;
0169     u32                 start_level;
0170     kvm_pte_t               *pgd;
0171     struct kvm_pgtable_mm_ops       *mm_ops;
0172 
0173     /* Stage-2 only */
0174     struct kvm_s2_mmu           *mmu;
0175     enum kvm_pgtable_stage2_flags       flags;
0176     kvm_pgtable_force_pte_cb_t      force_pte_cb;
0177 };
0178 
0179 /**
0180  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
0181  * @KVM_PGTABLE_WALK_LEAF:      Visit leaf entries, including invalid
0182  *                  entries.
0183  * @KVM_PGTABLE_WALK_TABLE_PRE:     Visit table entries before their
0184  *                  children.
0185  * @KVM_PGTABLE_WALK_TABLE_POST:    Visit table entries after their
0186  *                  children.
0187  */
0188 enum kvm_pgtable_walk_flags {
0189     KVM_PGTABLE_WALK_LEAF           = BIT(0),
0190     KVM_PGTABLE_WALK_TABLE_PRE      = BIT(1),
0191     KVM_PGTABLE_WALK_TABLE_POST     = BIT(2),
0192 };
0193 
0194 typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level,
0195                     kvm_pte_t *ptep,
0196                     enum kvm_pgtable_walk_flags flag,
0197                     void * const arg);
0198 
0199 /**
0200  * struct kvm_pgtable_walker - Hook into a page-table walk.
0201  * @cb:     Callback function to invoke during the walk.
0202  * @arg:    Argument passed to the callback function.
0203  * @flags:  Bitwise-OR of flags to identify the entry types on which to
0204  *      invoke the callback function.
0205  */
0206 struct kvm_pgtable_walker {
0207     const kvm_pgtable_visitor_fn_t      cb;
0208     void * const                arg;
0209     const enum kvm_pgtable_walk_flags   flags;
0210 };
0211 
0212 /**
0213  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
0214  * @pgt:    Uninitialised page-table structure to initialise.
0215  * @va_bits:    Maximum virtual address bits.
0216  * @mm_ops: Memory management callbacks.
0217  *
0218  * Return: 0 on success, negative error code on failure.
0219  */
0220 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
0221              struct kvm_pgtable_mm_ops *mm_ops);
0222 
0223 /**
0224  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
0225  * @pgt:    Page-table structure initialised by kvm_pgtable_hyp_init().
0226  *
0227  * The page-table is assumed to be unreachable by any hardware walkers prior
0228  * to freeing and therefore no TLB invalidation is performed.
0229  */
0230 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
0231 
0232 /**
0233  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
0234  * @pgt:    Page-table structure initialised by kvm_pgtable_hyp_init().
0235  * @addr:   Virtual address at which to place the mapping.
0236  * @size:   Size of the mapping.
0237  * @phys:   Physical address of the memory to map.
0238  * @prot:   Permissions and attributes for the mapping.
0239  *
0240  * The offset of @addr within a page is ignored, @size is rounded-up to
0241  * the next page boundary and @phys is rounded-down to the previous page
0242  * boundary.
0243  *
0244  * If device attributes are not explicitly requested in @prot, then the
0245  * mapping will be normal, cacheable. Attempts to install a new mapping
0246  * for a virtual address that is already mapped will be rejected with an
0247  * error and a WARN().
0248  *
0249  * Return: 0 on success, negative error code on failure.
0250  */
0251 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
0252             enum kvm_pgtable_prot prot);
0253 
0254 /**
0255  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
0256  * @pgt:    Page-table structure initialised by kvm_pgtable_hyp_init().
0257  * @addr:   Virtual address from which to remove the mapping.
0258  * @size:   Size of the mapping.
0259  *
0260  * The offset of @addr within a page is ignored, @size is rounded-up to
0261  * the next page boundary and @phys is rounded-down to the previous page
0262  * boundary.
0263  *
0264  * TLB invalidation is performed for each page-table entry cleared during the
0265  * unmapping operation and the reference count for the page-table page
0266  * containing the cleared entry is decremented, with unreferenced pages being
0267  * freed. The unmapping operation will stop early if it encounters either an
0268  * invalid page-table entry or a valid block mapping which maps beyond the range
0269  * being unmapped.
0270  *
0271  * Return: Number of bytes unmapped, which may be 0.
0272  */
0273 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
0274 
0275 /**
0276  * kvm_get_vtcr() - Helper to construct VTCR_EL2
0277  * @mmfr0:  Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
0278  * @mmfr1:  Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
0279  * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
0280  *
0281  * The VTCR value is common across all the physical CPUs on the system.
0282  * We use system wide sanitised values to fill in different fields,
0283  * except for Hardware Management of Access Flags. HA Flag is set
0284  * unconditionally on all CPUs, as it is safe to run with or without
0285  * the feature and the bit is RES0 on CPUs that don't support it.
0286  *
0287  * Return: VTCR_EL2 value
0288  */
0289 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
0290 
0291 /**
0292  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
0293  * @pgt:    Uninitialised page-table structure to initialise.
0294  * @mmu:    S2 MMU context for this S2 translation
0295  * @mm_ops: Memory management callbacks.
0296  * @flags:  Stage-2 configuration flags.
0297  * @force_pte_cb: Function that returns true if page level mappings must
0298  *      be used instead of block mappings.
0299  *
0300  * Return: 0 on success, negative error code on failure.
0301  */
0302 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
0303                   struct kvm_pgtable_mm_ops *mm_ops,
0304                   enum kvm_pgtable_stage2_flags flags,
0305                   kvm_pgtable_force_pte_cb_t force_pte_cb);
0306 
0307 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
0308     __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
0309 
0310 /**
0311  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
0312  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0313  *
0314  * The page-table is assumed to be unreachable by any hardware walkers prior
0315  * to freeing and therefore no TLB invalidation is performed.
0316  */
0317 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
0318 
0319 /**
0320  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
0321  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0322  * @addr:   Intermediate physical address at which to place the mapping.
0323  * @size:   Size of the mapping.
0324  * @phys:   Physical address of the memory to map.
0325  * @prot:   Permissions and attributes for the mapping.
0326  * @mc:     Cache of pre-allocated and zeroed memory from which to allocate
0327  *      page-table pages.
0328  *
0329  * The offset of @addr within a page is ignored, @size is rounded-up to
0330  * the next page boundary and @phys is rounded-down to the previous page
0331  * boundary.
0332  *
0333  * If device attributes are not explicitly requested in @prot, then the
0334  * mapping will be normal, cacheable.
0335  *
0336  * Note that the update of a valid leaf PTE in this function will be aborted,
0337  * if it's trying to recreate the exact same mapping or only change the access
0338  * permissions. Instead, the vCPU will exit one more time from guest if still
0339  * needed and then go through the path of relaxing permissions.
0340  *
0341  * Note that this function will both coalesce existing table entries and split
0342  * existing block mappings, relying on page-faults to fault back areas outside
0343  * of the new mapping lazily.
0344  *
0345  * Return: 0 on success, negative error code on failure.
0346  */
0347 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
0348                u64 phys, enum kvm_pgtable_prot prot,
0349                void *mc);
0350 
0351 /**
0352  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
0353  *                  track ownership.
0354  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0355  * @addr:   Base intermediate physical address to annotate.
0356  * @size:   Size of the annotated range.
0357  * @mc:     Cache of pre-allocated and zeroed memory from which to allocate
0358  *      page-table pages.
0359  * @owner_id:   Unique identifier for the owner of the page.
0360  *
0361  * By default, all page-tables are owned by identifier 0. This function can be
0362  * used to mark portions of the IPA space as owned by other entities. When a
0363  * stage 2 is used with identity-mappings, these annotations allow to use the
0364  * page-table data structure as a simple rmap.
0365  *
0366  * Return: 0 on success, negative error code on failure.
0367  */
0368 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
0369                  void *mc, u8 owner_id);
0370 
0371 /**
0372  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
0373  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0374  * @addr:   Intermediate physical address from which to remove the mapping.
0375  * @size:   Size of the mapping.
0376  *
0377  * The offset of @addr within a page is ignored and @size is rounded-up to
0378  * the next page boundary.
0379  *
0380  * TLB invalidation is performed for each page-table entry cleared during the
0381  * unmapping operation and the reference count for the page-table page
0382  * containing the cleared entry is decremented, with unreferenced pages being
0383  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
0384  * FWB is not supported by the CPU.
0385  *
0386  * Return: 0 on success, negative error code on failure.
0387  */
0388 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
0389 
0390 /**
0391  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
0392  *                                  without TLB invalidation.
0393  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0394  * @addr:   Intermediate physical address from which to write-protect,
0395  * @size:   Size of the range.
0396  *
0397  * The offset of @addr within a page is ignored and @size is rounded-up to
0398  * the next page boundary.
0399  *
0400  * Note that it is the caller's responsibility to invalidate the TLB after
0401  * calling this function to ensure that the updated permissions are visible
0402  * to the CPUs.
0403  *
0404  * Return: 0 on success, negative error code on failure.
0405  */
0406 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
0407 
0408 /**
0409  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
0410  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0411  * @addr:   Intermediate physical address to identify the page-table entry.
0412  *
0413  * The offset of @addr within a page is ignored.
0414  *
0415  * If there is a valid, leaf page-table entry used to translate @addr, then
0416  * set the access flag in that entry.
0417  *
0418  * Return: The old page-table entry prior to setting the flag, 0 on failure.
0419  */
0420 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
0421 
0422 /**
0423  * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
0424  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0425  * @addr:   Intermediate physical address to identify the page-table entry.
0426  *
0427  * The offset of @addr within a page is ignored.
0428  *
0429  * If there is a valid, leaf page-table entry used to translate @addr, then
0430  * clear the access flag in that entry.
0431  *
0432  * Note that it is the caller's responsibility to invalidate the TLB after
0433  * calling this function to ensure that the updated permissions are visible
0434  * to the CPUs.
0435  *
0436  * Return: The old page-table entry prior to clearing the flag, 0 on failure.
0437  */
0438 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
0439 
0440 /**
0441  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
0442  *                    page-table entry.
0443  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0444  * @addr:   Intermediate physical address to identify the page-table entry.
0445  * @prot:   Additional permissions to grant for the mapping.
0446  *
0447  * The offset of @addr within a page is ignored.
0448  *
0449  * If there is a valid, leaf page-table entry used to translate @addr, then
0450  * relax the permissions in that entry according to the read, write and
0451  * execute permissions specified by @prot. No permissions are removed, and
0452  * TLB invalidation is performed after updating the entry. Software bits cannot
0453  * be set or cleared using kvm_pgtable_stage2_relax_perms().
0454  *
0455  * Return: 0 on success, negative error code on failure.
0456  */
0457 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
0458                    enum kvm_pgtable_prot prot);
0459 
0460 /**
0461  * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
0462  *                 access flag set.
0463  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0464  * @addr:   Intermediate physical address to identify the page-table entry.
0465  *
0466  * The offset of @addr within a page is ignored.
0467  *
0468  * Return: True if the page-table entry has the access flag set, false otherwise.
0469  */
0470 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
0471 
0472 /**
0473  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
0474  *                    of Coherency for guest stage-2 address
0475  *                    range.
0476  * @pgt:    Page-table structure initialised by kvm_pgtable_stage2_init*().
0477  * @addr:   Intermediate physical address from which to flush.
0478  * @size:   Size of the range.
0479  *
0480  * The offset of @addr within a page is ignored and @size is rounded-up to
0481  * the next page boundary.
0482  *
0483  * Return: 0 on success, negative error code on failure.
0484  */
0485 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
0486 
0487 /**
0488  * kvm_pgtable_walk() - Walk a page-table.
0489  * @pgt:    Page-table structure initialised by kvm_pgtable_*_init().
0490  * @addr:   Input address for the start of the walk.
0491  * @size:   Size of the range to walk.
0492  * @walker: Walker callback description.
0493  *
0494  * The offset of @addr within a page is ignored and @size is rounded-up to
0495  * the next page boundary.
0496  *
0497  * The walker will walk the page-table entries corresponding to the input
0498  * address range specified, visiting entries according to the walker flags.
0499  * Invalid entries are treated as leaf entries. Leaf entries are reloaded
0500  * after invoking the walker callback, allowing the walker to descend into
0501  * a newly installed table.
0502  *
0503  * Returning a negative error code from the walker callback function will
0504  * terminate the walk immediately with the same error code.
0505  *
0506  * Return: 0 on success, negative error code on failure.
0507  */
0508 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
0509              struct kvm_pgtable_walker *walker);
0510 
0511 /**
0512  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
0513  *              with its level.
0514  * @pgt:    Page-table structure initialised by kvm_pgtable_*_init()
0515  *      or a similar initialiser.
0516  * @addr:   Input address for the start of the walk.
0517  * @ptep:   Pointer to storage for the retrieved PTE.
0518  * @level:  Pointer to storage for the level of the retrieved PTE.
0519  *
0520  * The offset of @addr within a page is ignored.
0521  *
0522  * The walker will walk the page-table entries corresponding to the input
0523  * address specified, retrieving the leaf corresponding to this address.
0524  * Invalid entries are treated as leaf entries.
0525  *
0526  * Return: 0 on success, negative error code on failure.
0527  */
0528 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
0529              kvm_pte_t *ptep, u32 *level);
0530 
0531 /**
0532  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
0533  *                 stage-2 Page-Table Entry.
0534  * @pte:    Page-table entry
0535  *
0536  * Return: protection attributes of the page-table entry in the enum
0537  *     kvm_pgtable_prot format.
0538  */
0539 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
0540 
0541 /**
0542  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
0543  *              Page-Table Entry.
0544  * @pte:    Page-table entry
0545  *
0546  * Return: protection attributes of the page-table entry in the enum
0547  *     kvm_pgtable_prot format.
0548  */
0549 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
0550 #endif  /* __ARM64_KVM_PGTABLE_H__ */