0001
0002 #ifndef _LINUX_HIGHMEM_H
0003 #define _LINUX_HIGHMEM_H
0004
0005 #include <linux/fs.h>
0006 #include <linux/kernel.h>
0007 #include <linux/bug.h>
0008 #include <linux/cacheflush.h>
0009 #include <linux/mm.h>
0010 #include <linux/uaccess.h>
0011 #include <linux/hardirq.h>
0012
0013 #include "highmem-internal.h"
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 static inline void *kmap(struct page *page);
0037
0038
0039
0040
0041
0042
0043
0044
0045 static inline void kunmap(struct page *page);
0046
0047
0048
0049
0050
0051
0052
0053 static inline struct page *kmap_to_page(void *addr);
0054
0055
0056
0057
0058
0059 static inline void kmap_flush_unused(void);
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 static inline void *kmap_local_page(struct page *page);
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 static inline void *kmap_atomic(struct page *page);
0180
0181
0182 static inline unsigned int nr_free_highpages(void);
0183 static inline unsigned long totalhigh_pages(void);
0184
0185 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
0186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
0187 {
0188 }
0189 #endif
0190
0191 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
0192 static inline void flush_kernel_vmap_range(void *vaddr, int size)
0193 {
0194 }
0195 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
0196 {
0197 }
0198 #endif
0199
0200
0201 #ifndef clear_user_highpage
0202 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
0203 {
0204 void *addr = kmap_local_page(page);
0205 clear_user_page(addr, vaddr, page);
0206 kunmap_local(addr);
0207 }
0208 #endif
0209
0210 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 static inline struct page *
0226 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
0227 unsigned long vaddr)
0228 {
0229 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
0230
0231 if (page)
0232 clear_user_highpage(page, vaddr);
0233
0234 return page;
0235 }
0236 #endif
0237
0238 static inline void clear_highpage(struct page *page)
0239 {
0240 void *kaddr = kmap_local_page(page);
0241 clear_page(kaddr);
0242 kunmap_local(kaddr);
0243 }
0244
0245 static inline void clear_highpage_kasan_tagged(struct page *page)
0246 {
0247 u8 tag;
0248
0249 tag = page_kasan_tag(page);
0250 page_kasan_tag_reset(page);
0251 clear_highpage(page);
0252 page_kasan_tag_set(page, tag);
0253 }
0254
0255 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
0256
0257 static inline void tag_clear_highpage(struct page *page)
0258 {
0259 }
0260
0261 #endif
0262
0263
0264
0265
0266
0267 #ifdef CONFIG_HIGHMEM
0268 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
0269 unsigned start2, unsigned end2);
0270 #else
0271 static inline void zero_user_segments(struct page *page,
0272 unsigned start1, unsigned end1,
0273 unsigned start2, unsigned end2)
0274 {
0275 void *kaddr = kmap_local_page(page);
0276 unsigned int i;
0277
0278 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
0279
0280 if (end1 > start1)
0281 memset(kaddr + start1, 0, end1 - start1);
0282
0283 if (end2 > start2)
0284 memset(kaddr + start2, 0, end2 - start2);
0285
0286 kunmap_local(kaddr);
0287 for (i = 0; i < compound_nr(page); i++)
0288 flush_dcache_page(page + i);
0289 }
0290 #endif
0291
0292 static inline void zero_user_segment(struct page *page,
0293 unsigned start, unsigned end)
0294 {
0295 zero_user_segments(page, start, end, 0, 0);
0296 }
0297
0298 static inline void zero_user(struct page *page,
0299 unsigned start, unsigned size)
0300 {
0301 zero_user_segments(page, start, start + size, 0, 0);
0302 }
0303
0304 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
0305
0306 static inline void copy_user_highpage(struct page *to, struct page *from,
0307 unsigned long vaddr, struct vm_area_struct *vma)
0308 {
0309 char *vfrom, *vto;
0310
0311 vfrom = kmap_local_page(from);
0312 vto = kmap_local_page(to);
0313 copy_user_page(vto, vfrom, vaddr, to);
0314 kunmap_local(vto);
0315 kunmap_local(vfrom);
0316 }
0317
0318 #endif
0319
0320 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
0321
0322 static inline void copy_highpage(struct page *to, struct page *from)
0323 {
0324 char *vfrom, *vto;
0325
0326 vfrom = kmap_local_page(from);
0327 vto = kmap_local_page(to);
0328 copy_page(vto, vfrom);
0329 kunmap_local(vto);
0330 kunmap_local(vfrom);
0331 }
0332
0333 #endif
0334
0335 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
0336 struct page *src_page, size_t src_off,
0337 size_t len)
0338 {
0339 char *dst = kmap_local_page(dst_page);
0340 char *src = kmap_local_page(src_page);
0341
0342 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
0343 memcpy(dst + dst_off, src + src_off, len);
0344 kunmap_local(src);
0345 kunmap_local(dst);
0346 }
0347
0348 static inline void memset_page(struct page *page, size_t offset, int val,
0349 size_t len)
0350 {
0351 char *addr = kmap_local_page(page);
0352
0353 VM_BUG_ON(offset + len > PAGE_SIZE);
0354 memset(addr + offset, val, len);
0355 kunmap_local(addr);
0356 }
0357
0358 static inline void memcpy_from_page(char *to, struct page *page,
0359 size_t offset, size_t len)
0360 {
0361 char *from = kmap_local_page(page);
0362
0363 VM_BUG_ON(offset + len > PAGE_SIZE);
0364 memcpy(to, from + offset, len);
0365 kunmap_local(from);
0366 }
0367
0368 static inline void memcpy_to_page(struct page *page, size_t offset,
0369 const char *from, size_t len)
0370 {
0371 char *to = kmap_local_page(page);
0372
0373 VM_BUG_ON(offset + len > PAGE_SIZE);
0374 memcpy(to + offset, from, len);
0375 flush_dcache_page(page);
0376 kunmap_local(to);
0377 }
0378
0379 static inline void memzero_page(struct page *page, size_t offset, size_t len)
0380 {
0381 char *addr = kmap_local_page(page);
0382
0383 VM_BUG_ON(offset + len > PAGE_SIZE);
0384 memset(addr + offset, 0, len);
0385 flush_dcache_page(page);
0386 kunmap_local(addr);
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 static inline void folio_zero_segments(struct folio *folio,
0398 size_t start1, size_t xend1, size_t start2, size_t xend2)
0399 {
0400 zero_user_segments(&folio->page, start1, xend1, start2, xend2);
0401 }
0402
0403
0404
0405
0406
0407
0408
0409 static inline void folio_zero_segment(struct folio *folio,
0410 size_t start, size_t xend)
0411 {
0412 zero_user_segments(&folio->page, start, xend, 0, 0);
0413 }
0414
0415
0416
0417
0418
0419
0420
0421 static inline void folio_zero_range(struct folio *folio,
0422 size_t start, size_t length)
0423 {
0424 zero_user_segments(&folio->page, start, start + length, 0, 0);
0425 }
0426
0427 #endif