Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 2020 ARM Ltd.
0004  */
0005 #ifndef __ASM_MTE_H
0006 #define __ASM_MTE_H
0007 
0008 #include <asm/compiler.h>
0009 #include <asm/mte-def.h>
0010 
0011 #ifndef __ASSEMBLY__
0012 
0013 #include <linux/bitfield.h>
0014 #include <linux/kasan-enabled.h>
0015 #include <linux/page-flags.h>
0016 #include <linux/sched.h>
0017 #include <linux/types.h>
0018 
0019 #include <asm/pgtable-types.h>
0020 
0021 void mte_clear_page_tags(void *addr);
0022 unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
0023                       unsigned long n);
0024 unsigned long mte_copy_tags_to_user(void __user *to, void *from,
0025                     unsigned long n);
0026 int mte_save_tags(struct page *page);
0027 void mte_save_page_tags(const void *page_addr, void *tag_storage);
0028 bool mte_restore_tags(swp_entry_t entry, struct page *page);
0029 void mte_restore_page_tags(void *page_addr, const void *tag_storage);
0030 void mte_invalidate_tags(int type, pgoff_t offset);
0031 void mte_invalidate_tags_area(int type);
0032 void *mte_allocate_tag_storage(void);
0033 void mte_free_tag_storage(char *storage);
0034 
0035 #ifdef CONFIG_ARM64_MTE
0036 
0037 /* track which pages have valid allocation tags */
0038 #define PG_mte_tagged   PG_arch_2
0039 
0040 void mte_zero_clear_page_tags(void *addr);
0041 void mte_sync_tags(pte_t old_pte, pte_t pte);
0042 void mte_copy_page_tags(void *kto, const void *kfrom);
0043 void mte_thread_init_user(void);
0044 void mte_thread_switch(struct task_struct *next);
0045 void mte_suspend_enter(void);
0046 long set_mte_ctrl(struct task_struct *task, unsigned long arg);
0047 long get_mte_ctrl(struct task_struct *task);
0048 int mte_ptrace_copy_tags(struct task_struct *child, long request,
0049              unsigned long addr, unsigned long data);
0050 size_t mte_probe_user_range(const char __user *uaddr, size_t size);
0051 
0052 #else /* CONFIG_ARM64_MTE */
0053 
0054 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
0055 #define PG_mte_tagged   0
0056 
0057 static inline void mte_zero_clear_page_tags(void *addr)
0058 {
0059 }
0060 static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
0061 {
0062 }
0063 static inline void mte_copy_page_tags(void *kto, const void *kfrom)
0064 {
0065 }
0066 static inline void mte_thread_init_user(void)
0067 {
0068 }
0069 static inline void mte_thread_switch(struct task_struct *next)
0070 {
0071 }
0072 static inline void mte_suspend_enter(void)
0073 {
0074 }
0075 static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
0076 {
0077     return 0;
0078 }
0079 static inline long get_mte_ctrl(struct task_struct *task)
0080 {
0081     return 0;
0082 }
0083 static inline int mte_ptrace_copy_tags(struct task_struct *child,
0084                        long request, unsigned long addr,
0085                        unsigned long data)
0086 {
0087     return -EIO;
0088 }
0089 
0090 #endif /* CONFIG_ARM64_MTE */
0091 
0092 static inline void mte_disable_tco_entry(struct task_struct *task)
0093 {
0094     if (!system_supports_mte())
0095         return;
0096 
0097     /*
0098      * Re-enable tag checking (TCO set on exception entry). This is only
0099      * necessary if MTE is enabled in either the kernel or the userspace
0100      * task in synchronous or asymmetric mode (SCTLR_EL1.TCF0 bit 0 is set
0101      * for both). With MTE disabled in the kernel and disabled or
0102      * asynchronous in userspace, tag check faults (including in uaccesses)
0103      * are not reported, therefore there is no need to re-enable checking.
0104      * This is beneficial on microarchitectures where re-enabling TCO is
0105      * expensive.
0106      */
0107     if (kasan_hw_tags_enabled() ||
0108         (task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT)))
0109         asm volatile(SET_PSTATE_TCO(0));
0110 }
0111 
0112 #ifdef CONFIG_KASAN_HW_TAGS
0113 /* Whether the MTE asynchronous mode is enabled. */
0114 DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
0115 
0116 static inline bool system_uses_mte_async_or_asymm_mode(void)
0117 {
0118     return static_branch_unlikely(&mte_async_or_asymm_mode);
0119 }
0120 
0121 void mte_check_tfsr_el1(void);
0122 
0123 static inline void mte_check_tfsr_entry(void)
0124 {
0125     if (!system_supports_mte())
0126         return;
0127 
0128     mte_check_tfsr_el1();
0129 }
0130 
0131 static inline void mte_check_tfsr_exit(void)
0132 {
0133     if (!system_supports_mte())
0134         return;
0135 
0136     /*
0137      * The asynchronous faults are sync'ed automatically with
0138      * TFSR_EL1 on kernel entry but for exit an explicit dsb()
0139      * is required.
0140      */
0141     dsb(nsh);
0142     isb();
0143 
0144     mte_check_tfsr_el1();
0145 }
0146 #else
0147 static inline bool system_uses_mte_async_or_asymm_mode(void)
0148 {
0149     return false;
0150 }
0151 static inline void mte_check_tfsr_el1(void)
0152 {
0153 }
0154 static inline void mte_check_tfsr_entry(void)
0155 {
0156 }
0157 static inline void mte_check_tfsr_exit(void)
0158 {
0159 }
0160 #endif /* CONFIG_KASAN_HW_TAGS */
0161 
0162 #endif /* __ASSEMBLY__ */
0163 #endif /* __ASM_MTE_H  */