0001
0002 #ifndef _LINUX_HIGHMEM_INTERNAL_H
0003 #define _LINUX_HIGHMEM_INTERNAL_H
0004
0005
0006
0007
0008 #ifdef CONFIG_KMAP_LOCAL
0009 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
0010 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
0011 void kunmap_local_indexed(const void *vaddr);
0012 void kmap_local_fork(struct task_struct *tsk);
0013 void __kmap_local_sched_out(void);
0014 void __kmap_local_sched_in(void);
0015 static inline void kmap_assert_nomap(void)
0016 {
0017 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
0018 }
0019 #else
0020 static inline void kmap_local_fork(struct task_struct *tsk) { }
0021 static inline void kmap_assert_nomap(void) { }
0022 #endif
0023
0024 #ifdef CONFIG_HIGHMEM
0025 #include <asm/highmem.h>
0026
0027 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
0028 static inline void kmap_flush_tlb(unsigned long addr) { }
0029 #endif
0030
0031 #ifndef kmap_prot
0032 #define kmap_prot PAGE_KERNEL
0033 #endif
0034
0035 void *kmap_high(struct page *page);
0036 void kunmap_high(struct page *page);
0037 void __kmap_flush_unused(void);
0038 struct page *__kmap_to_page(void *addr);
0039
0040 static inline void *kmap(struct page *page)
0041 {
0042 void *addr;
0043
0044 might_sleep();
0045 if (!PageHighMem(page))
0046 addr = page_address(page);
0047 else
0048 addr = kmap_high(page);
0049 kmap_flush_tlb((unsigned long)addr);
0050 return addr;
0051 }
0052
0053 static inline void kunmap(struct page *page)
0054 {
0055 might_sleep();
0056 if (!PageHighMem(page))
0057 return;
0058 kunmap_high(page);
0059 }
0060
0061 static inline struct page *kmap_to_page(void *addr)
0062 {
0063 return __kmap_to_page(addr);
0064 }
0065
0066 static inline void kmap_flush_unused(void)
0067 {
0068 __kmap_flush_unused();
0069 }
0070
0071 static inline void *kmap_local_page(struct page *page)
0072 {
0073 return __kmap_local_page_prot(page, kmap_prot);
0074 }
0075
0076 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
0077 {
0078 struct page *page = folio_page(folio, offset / PAGE_SIZE);
0079 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
0080 }
0081
0082 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
0083 {
0084 return __kmap_local_page_prot(page, prot);
0085 }
0086
0087 static inline void *kmap_local_pfn(unsigned long pfn)
0088 {
0089 return __kmap_local_pfn_prot(pfn, kmap_prot);
0090 }
0091
0092 static inline void __kunmap_local(const void *vaddr)
0093 {
0094 kunmap_local_indexed(vaddr);
0095 }
0096
0097 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
0098 {
0099 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0100 migrate_disable();
0101 else
0102 preempt_disable();
0103
0104 pagefault_disable();
0105 return __kmap_local_page_prot(page, prot);
0106 }
0107
0108 static inline void *kmap_atomic(struct page *page)
0109 {
0110 return kmap_atomic_prot(page, kmap_prot);
0111 }
0112
0113 static inline void *kmap_atomic_pfn(unsigned long pfn)
0114 {
0115 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0116 migrate_disable();
0117 else
0118 preempt_disable();
0119
0120 pagefault_disable();
0121 return __kmap_local_pfn_prot(pfn, kmap_prot);
0122 }
0123
0124 static inline void __kunmap_atomic(const void *addr)
0125 {
0126 kunmap_local_indexed(addr);
0127 pagefault_enable();
0128 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0129 migrate_enable();
0130 else
0131 preempt_enable();
0132 }
0133
0134 unsigned int __nr_free_highpages(void);
0135 extern atomic_long_t _totalhigh_pages;
0136
0137 static inline unsigned int nr_free_highpages(void)
0138 {
0139 return __nr_free_highpages();
0140 }
0141
0142 static inline unsigned long totalhigh_pages(void)
0143 {
0144 return (unsigned long)atomic_long_read(&_totalhigh_pages);
0145 }
0146
0147 static inline void totalhigh_pages_add(long count)
0148 {
0149 atomic_long_add(count, &_totalhigh_pages);
0150 }
0151
0152 static inline bool is_kmap_addr(const void *x)
0153 {
0154 unsigned long addr = (unsigned long)x;
0155 return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
0156 }
0157 #else
0158
0159 static inline struct page *kmap_to_page(void *addr)
0160 {
0161 return virt_to_page(addr);
0162 }
0163
0164 static inline void *kmap(struct page *page)
0165 {
0166 might_sleep();
0167 return page_address(page);
0168 }
0169
0170 static inline void kunmap_high(struct page *page) { }
0171 static inline void kmap_flush_unused(void) { }
0172
0173 static inline void kunmap(struct page *page)
0174 {
0175 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
0176 kunmap_flush_on_unmap(page_address(page));
0177 #endif
0178 }
0179
0180 static inline void *kmap_local_page(struct page *page)
0181 {
0182 return page_address(page);
0183 }
0184
0185 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
0186 {
0187 return page_address(&folio->page) + offset;
0188 }
0189
0190 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
0191 {
0192 return kmap_local_page(page);
0193 }
0194
0195 static inline void *kmap_local_pfn(unsigned long pfn)
0196 {
0197 return kmap_local_page(pfn_to_page(pfn));
0198 }
0199
0200 static inline void __kunmap_local(const void *addr)
0201 {
0202 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
0203 kunmap_flush_on_unmap(addr);
0204 #endif
0205 }
0206
0207 static inline void *kmap_atomic(struct page *page)
0208 {
0209 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0210 migrate_disable();
0211 else
0212 preempt_disable();
0213 pagefault_disable();
0214 return page_address(page);
0215 }
0216
0217 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
0218 {
0219 return kmap_atomic(page);
0220 }
0221
0222 static inline void *kmap_atomic_pfn(unsigned long pfn)
0223 {
0224 return kmap_atomic(pfn_to_page(pfn));
0225 }
0226
0227 static inline void __kunmap_atomic(const void *addr)
0228 {
0229 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
0230 kunmap_flush_on_unmap(addr);
0231 #endif
0232 pagefault_enable();
0233 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0234 migrate_enable();
0235 else
0236 preempt_enable();
0237 }
0238
0239 static inline unsigned int nr_free_highpages(void) { return 0; }
0240 static inline unsigned long totalhigh_pages(void) { return 0UL; }
0241
0242 static inline bool is_kmap_addr(const void *x)
0243 {
0244 return false;
0245 }
0246
0247 #endif
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 #define kunmap_atomic(__addr) \
0266 do { \
0267 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
0268 __kunmap_atomic(__addr); \
0269 } while (0)
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 #define kunmap_local(__addr) \
0282 do { \
0283 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
0284 __kunmap_local(__addr); \
0285 } while (0)
0286
0287 #endif