Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 
0003 #ifndef _ASM_S390_KFENCE_H
0004 #define _ASM_S390_KFENCE_H
0005 
0006 #include <linux/mm.h>
0007 #include <linux/kfence.h>
0008 #include <asm/set_memory.h>
0009 #include <asm/page.h>
0010 
0011 void __kernel_map_pages(struct page *page, int numpages, int enable);
0012 
0013 static __always_inline bool arch_kfence_init_pool(void)
0014 {
0015     return true;
0016 }
0017 
0018 #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
0019 
0020 /*
0021  * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
0022  * but earlier where page table allocations still happen with memblock.
0023  * Reason is that arch_kfence_init_pool() gets called when the system
0024  * is still in a limbo state - disabling and enabling bottom halves is
0025  * not yet allowed, but that is what our page_table_alloc() would do.
0026  */
0027 static __always_inline void kfence_split_mapping(void)
0028 {
0029 #ifdef CONFIG_KFENCE
0030     unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
0031 
0032     set_memory_4k((unsigned long)__kfence_pool, pool_pages);
0033 #endif
0034 }
0035 
0036 static inline bool kfence_protect_page(unsigned long addr, bool protect)
0037 {
0038     __kernel_map_pages(virt_to_page(addr), 1, !protect);
0039     return true;
0040 }
0041 
0042 #endif /* _ASM_S390_KFENCE_H */