Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_HIGHMEM_H
0003 #define _LINUX_HIGHMEM_H
0004 
0005 #include <linux/fs.h>
0006 #include <linux/kernel.h>
0007 #include <linux/bug.h>
0008 #include <linux/cacheflush.h>
0009 #include <linux/mm.h>
0010 #include <linux/uaccess.h>
0011 #include <linux/hardirq.h>
0012 
0013 #include "highmem-internal.h"
0014 
0015 /**
0016  * kmap - Map a page for long term usage
0017  * @page:   Pointer to the page to be mapped
0018  *
0019  * Returns: The virtual address of the mapping
0020  *
0021  * Can only be invoked from preemptible task context because on 32bit
0022  * systems with CONFIG_HIGHMEM enabled this function might sleep.
0023  *
0024  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
0025  * this returns the virtual address of the direct kernel mapping.
0026  *
0027  * The returned virtual address is globally visible and valid up to the
0028  * point where it is unmapped via kunmap(). The pointer can be handed to
0029  * other contexts.
0030  *
0031  * For highmem pages on 32bit systems this can be slow as the mapping space
0032  * is limited and protected by a global lock. In case that there is no
0033  * mapping slot available the function blocks until a slot is released via
0034  * kunmap().
0035  */
0036 static inline void *kmap(struct page *page);
0037 
0038 /**
0039  * kunmap - Unmap the virtual address mapped by kmap()
0040  * @page:   Pointer to the page which was mapped by kmap()
0041  *
0042  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
0043  * pages in the low memory area.
0044  */
0045 static inline void kunmap(struct page *page);
0046 
0047 /**
0048  * kmap_to_page - Get the page for a kmap'ed address
0049  * @addr:   The address to look up
0050  *
0051  * Returns: The page which is mapped to @addr.
0052  */
0053 static inline struct page *kmap_to_page(void *addr);
0054 
0055 /**
0056  * kmap_flush_unused - Flush all unused kmap mappings in order to
0057  *             remove stray mappings
0058  */
0059 static inline void kmap_flush_unused(void);
0060 
0061 /**
0062  * kmap_local_page - Map a page for temporary usage
0063  * @page: Pointer to the page to be mapped
0064  *
0065  * Returns: The virtual address of the mapping
0066  *
0067  * Can be invoked from any context, including interrupts.
0068  *
0069  * Requires careful handling when nesting multiple mappings because the map
0070  * management is stack based. The unmap has to be in the reverse order of
0071  * the map operation:
0072  *
0073  * addr1 = kmap_local_page(page1);
0074  * addr2 = kmap_local_page(page2);
0075  * ...
0076  * kunmap_local(addr2);
0077  * kunmap_local(addr1);
0078  *
0079  * Unmapping addr1 before addr2 is invalid and causes malfunction.
0080  *
0081  * Contrary to kmap() mappings the mapping is only valid in the context of
0082  * the caller and cannot be handed to other contexts.
0083  *
0084  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
0085  * virtual address of the direct mapping. Only real highmem pages are
0086  * temporarily mapped.
0087  *
0088  * While it is significantly faster than kmap() for the higmem case it
0089  * comes with restrictions about the pointer validity.
0090  *
0091  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
0092  * disabling migration in order to keep the virtual address stable across
0093  * preemption. No caller of kmap_local_page() can rely on this side effect.
0094  */
0095 static inline void *kmap_local_page(struct page *page);
0096 
0097 /**
0098  * kmap_local_folio - Map a page in this folio for temporary usage
0099  * @folio: The folio containing the page.
0100  * @offset: The byte offset within the folio which identifies the page.
0101  *
0102  * Requires careful handling when nesting multiple mappings because the map
0103  * management is stack based. The unmap has to be in the reverse order of
0104  * the map operation::
0105  *
0106  *   addr1 = kmap_local_folio(folio1, offset1);
0107  *   addr2 = kmap_local_folio(folio2, offset2);
0108  *   ...
0109  *   kunmap_local(addr2);
0110  *   kunmap_local(addr1);
0111  *
0112  * Unmapping addr1 before addr2 is invalid and causes malfunction.
0113  *
0114  * Contrary to kmap() mappings the mapping is only valid in the context of
0115  * the caller and cannot be handed to other contexts.
0116  *
0117  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
0118  * virtual address of the direct mapping. Only real highmem pages are
0119  * temporarily mapped.
0120  *
0121  * While it is significantly faster than kmap() for the higmem case it
0122  * comes with restrictions about the pointer validity. Only use when really
0123  * necessary.
0124  *
0125  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
0126  * disabling migration in order to keep the virtual address stable across
0127  * preemption. No caller of kmap_local_folio() can rely on this side effect.
0128  *
0129  * Context: Can be invoked from any context.
0130  * Return: The virtual address of @offset.
0131  */
0132 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
0133 
0134 /**
0135  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
0136  * @page:   Pointer to the page to be mapped
0137  *
0138  * Returns: The virtual address of the mapping
0139  *
0140  * In fact a wrapper around kmap_local_page() which also disables pagefaults
0141  * and, depending on PREEMPT_RT configuration, also CPU migration and
0142  * preemption. Therefore users should not count on the latter two side effects.
0143  *
0144  * Mappings should always be released by kunmap_atomic().
0145  *
0146  * Do not use in new code. Use kmap_local_page() instead.
0147  *
0148  * It is used in atomic context when code wants to access the contents of a
0149  * page that might be allocated from high memory (see __GFP_HIGHMEM), for
0150  * example a page in the pagecache.  The API has two functions, and they
0151  * can be used in a manner similar to the following::
0152  *
0153  *   // Find the page of interest.
0154  *   struct page *page = find_get_page(mapping, offset);
0155  *
0156  *   // Gain access to the contents of that page.
0157  *   void *vaddr = kmap_atomic(page);
0158  *
0159  *   // Do something to the contents of that page.
0160  *   memset(vaddr, 0, PAGE_SIZE);
0161  *
0162  *   // Unmap that page.
0163  *   kunmap_atomic(vaddr);
0164  *
0165  * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
0166  * call, not the argument.
0167  *
0168  * If you need to map two pages because you want to copy from one page to
0169  * another you need to keep the kmap_atomic calls strictly nested, like:
0170  *
0171  * vaddr1 = kmap_atomic(page1);
0172  * vaddr2 = kmap_atomic(page2);
0173  *
0174  * memcpy(vaddr1, vaddr2, PAGE_SIZE);
0175  *
0176  * kunmap_atomic(vaddr2);
0177  * kunmap_atomic(vaddr1);
0178  */
0179 static inline void *kmap_atomic(struct page *page);
0180 
0181 /* Highmem related interfaces for management code */
0182 static inline unsigned int nr_free_highpages(void);
0183 static inline unsigned long totalhigh_pages(void);
0184 
0185 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
0186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
0187 {
0188 }
0189 #endif
0190 
0191 #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
0192 static inline void flush_kernel_vmap_range(void *vaddr, int size)
0193 {
0194 }
0195 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
0196 {
0197 }
0198 #endif
0199 
0200 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
0201 #ifndef clear_user_highpage
0202 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
0203 {
0204     void *addr = kmap_local_page(page);
0205     clear_user_page(addr, vaddr, page);
0206     kunmap_local(addr);
0207 }
0208 #endif
0209 
0210 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
0211 /**
0212  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
0213  * @vma: The VMA the page is to be allocated for
0214  * @vaddr: The virtual address the page will be inserted into
0215  *
0216  * Returns: The allocated and zeroed HIGHMEM page
0217  *
0218  * This function will allocate a page for a VMA that the caller knows will
0219  * be able to migrate in the future using move_pages() or reclaimed
0220  *
0221  * An architecture may override this function by defining
0222  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
0223  * implementation.
0224  */
0225 static inline struct page *
0226 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
0227                    unsigned long vaddr)
0228 {
0229     struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
0230 
0231     if (page)
0232         clear_user_highpage(page, vaddr);
0233 
0234     return page;
0235 }
0236 #endif
0237 
0238 static inline void clear_highpage(struct page *page)
0239 {
0240     void *kaddr = kmap_local_page(page);
0241     clear_page(kaddr);
0242     kunmap_local(kaddr);
0243 }
0244 
0245 static inline void clear_highpage_kasan_tagged(struct page *page)
0246 {
0247     u8 tag;
0248 
0249     tag = page_kasan_tag(page);
0250     page_kasan_tag_reset(page);
0251     clear_highpage(page);
0252     page_kasan_tag_set(page, tag);
0253 }
0254 
0255 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
0256 
0257 static inline void tag_clear_highpage(struct page *page)
0258 {
0259 }
0260 
0261 #endif
0262 
0263 /*
0264  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
0265  * If we pass in a head page, we can zero up to the size of the compound page.
0266  */
0267 #ifdef CONFIG_HIGHMEM
0268 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
0269         unsigned start2, unsigned end2);
0270 #else
0271 static inline void zero_user_segments(struct page *page,
0272         unsigned start1, unsigned end1,
0273         unsigned start2, unsigned end2)
0274 {
0275     void *kaddr = kmap_local_page(page);
0276     unsigned int i;
0277 
0278     BUG_ON(end1 > page_size(page) || end2 > page_size(page));
0279 
0280     if (end1 > start1)
0281         memset(kaddr + start1, 0, end1 - start1);
0282 
0283     if (end2 > start2)
0284         memset(kaddr + start2, 0, end2 - start2);
0285 
0286     kunmap_local(kaddr);
0287     for (i = 0; i < compound_nr(page); i++)
0288         flush_dcache_page(page + i);
0289 }
0290 #endif
0291 
0292 static inline void zero_user_segment(struct page *page,
0293     unsigned start, unsigned end)
0294 {
0295     zero_user_segments(page, start, end, 0, 0);
0296 }
0297 
0298 static inline void zero_user(struct page *page,
0299     unsigned start, unsigned size)
0300 {
0301     zero_user_segments(page, start, start + size, 0, 0);
0302 }
0303 
0304 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
0305 
0306 static inline void copy_user_highpage(struct page *to, struct page *from,
0307     unsigned long vaddr, struct vm_area_struct *vma)
0308 {
0309     char *vfrom, *vto;
0310 
0311     vfrom = kmap_local_page(from);
0312     vto = kmap_local_page(to);
0313     copy_user_page(vto, vfrom, vaddr, to);
0314     kunmap_local(vto);
0315     kunmap_local(vfrom);
0316 }
0317 
0318 #endif
0319 
0320 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
0321 
0322 static inline void copy_highpage(struct page *to, struct page *from)
0323 {
0324     char *vfrom, *vto;
0325 
0326     vfrom = kmap_local_page(from);
0327     vto = kmap_local_page(to);
0328     copy_page(vto, vfrom);
0329     kunmap_local(vto);
0330     kunmap_local(vfrom);
0331 }
0332 
0333 #endif
0334 
0335 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
0336                    struct page *src_page, size_t src_off,
0337                    size_t len)
0338 {
0339     char *dst = kmap_local_page(dst_page);
0340     char *src = kmap_local_page(src_page);
0341 
0342     VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
0343     memcpy(dst + dst_off, src + src_off, len);
0344     kunmap_local(src);
0345     kunmap_local(dst);
0346 }
0347 
0348 static inline void memset_page(struct page *page, size_t offset, int val,
0349                    size_t len)
0350 {
0351     char *addr = kmap_local_page(page);
0352 
0353     VM_BUG_ON(offset + len > PAGE_SIZE);
0354     memset(addr + offset, val, len);
0355     kunmap_local(addr);
0356 }
0357 
0358 static inline void memcpy_from_page(char *to, struct page *page,
0359                     size_t offset, size_t len)
0360 {
0361     char *from = kmap_local_page(page);
0362 
0363     VM_BUG_ON(offset + len > PAGE_SIZE);
0364     memcpy(to, from + offset, len);
0365     kunmap_local(from);
0366 }
0367 
0368 static inline void memcpy_to_page(struct page *page, size_t offset,
0369                   const char *from, size_t len)
0370 {
0371     char *to = kmap_local_page(page);
0372 
0373     VM_BUG_ON(offset + len > PAGE_SIZE);
0374     memcpy(to + offset, from, len);
0375     flush_dcache_page(page);
0376     kunmap_local(to);
0377 }
0378 
0379 static inline void memzero_page(struct page *page, size_t offset, size_t len)
0380 {
0381     char *addr = kmap_local_page(page);
0382 
0383     VM_BUG_ON(offset + len > PAGE_SIZE);
0384     memset(addr + offset, 0, len);
0385     flush_dcache_page(page);
0386     kunmap_local(addr);
0387 }
0388 
0389 /**
0390  * folio_zero_segments() - Zero two byte ranges in a folio.
0391  * @folio: The folio to write to.
0392  * @start1: The first byte to zero.
0393  * @xend1: One more than the last byte in the first range.
0394  * @start2: The first byte to zero in the second range.
0395  * @xend2: One more than the last byte in the second range.
0396  */
0397 static inline void folio_zero_segments(struct folio *folio,
0398         size_t start1, size_t xend1, size_t start2, size_t xend2)
0399 {
0400     zero_user_segments(&folio->page, start1, xend1, start2, xend2);
0401 }
0402 
0403 /**
0404  * folio_zero_segment() - Zero a byte range in a folio.
0405  * @folio: The folio to write to.
0406  * @start: The first byte to zero.
0407  * @xend: One more than the last byte to zero.
0408  */
0409 static inline void folio_zero_segment(struct folio *folio,
0410         size_t start, size_t xend)
0411 {
0412     zero_user_segments(&folio->page, start, xend, 0, 0);
0413 }
0414 
0415 /**
0416  * folio_zero_range() - Zero a byte range in a folio.
0417  * @folio: The folio to write to.
0418  * @start: The first byte to zero.
0419  * @length: The number of bytes to zero.
0420  */
0421 static inline void folio_zero_range(struct folio *folio,
0422         size_t start, size_t length)
0423 {
0424     zero_user_segments(&folio->page, start, start + length, 0, 0);
0425 }
0426 
0427 #endif /* _LINUX_HIGHMEM_H */