Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SWAP_H
0003 #define _LINUX_SWAP_H
0004 
0005 #include <linux/spinlock.h>
0006 #include <linux/linkage.h>
0007 #include <linux/mmzone.h>
0008 #include <linux/list.h>
0009 #include <linux/memcontrol.h>
0010 #include <linux/sched.h>
0011 #include <linux/node.h>
0012 #include <linux/fs.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/atomic.h>
0015 #include <linux/page-flags.h>
0016 #include <uapi/linux/mempolicy.h>
0017 #include <asm/page.h>
0018 
0019 struct notifier_block;
0020 
0021 struct bio;
0022 
0023 struct pagevec;
0024 
0025 #define SWAP_FLAG_PREFER    0x8000  /* set if swap priority specified */
0026 #define SWAP_FLAG_PRIO_MASK 0x7fff
0027 #define SWAP_FLAG_PRIO_SHIFT    0
0028 #define SWAP_FLAG_DISCARD   0x10000 /* enable discard for swap */
0029 #define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
0030 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
0031 
0032 #define SWAP_FLAGS_VALID    (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
0033                  SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
0034                  SWAP_FLAG_DISCARD_PAGES)
0035 #define SWAP_BATCH 64
0036 
0037 static inline int current_is_kswapd(void)
0038 {
0039     return current->flags & PF_KSWAPD;
0040 }
0041 
0042 /*
0043  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
0044  * be swapped to.  The swap type and the offset into that swap type are
0045  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
0046  * for the type means that the maximum number of swapcache pages is 27 bits
0047  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
0048  * the type/offset into the pte as 5/27 as well.
0049  */
0050 #define MAX_SWAPFILES_SHIFT 5
0051 
0052 /*
0053  * Use some of the swap files numbers for other purposes. This
0054  * is a convenient way to hook into the VM to trigger special
0055  * actions on faults.
0056  */
0057 
0058 #define SWP_SWAPIN_ERROR_NUM 1
0059 #define SWP_SWAPIN_ERROR     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
0060                  SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
0061                  SWP_PTE_MARKER_NUM)
0062 /*
0063  * PTE markers are used to persist information onto PTEs that are mapped with
0064  * file-backed memories.  As its name "PTE" hints, it should only be applied to
0065  * the leaves of pgtables.
0066  */
0067 #ifdef CONFIG_PTE_MARKER
0068 #define SWP_PTE_MARKER_NUM 1
0069 #define SWP_PTE_MARKER     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
0070                 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
0071 #else
0072 #define SWP_PTE_MARKER_NUM 0
0073 #endif
0074 
0075 /*
0076  * Unaddressable device memory support. See include/linux/hmm.h and
0077  * Documentation/mm/hmm.rst. Short description is we need struct pages for
0078  * device memory that is unaddressable (inaccessible) by CPU, so that we can
0079  * migrate part of a process memory to device memory.
0080  *
0081  * When a page is migrated from CPU to device, we set the CPU page table entry
0082  * to a special SWP_DEVICE_{READ|WRITE} entry.
0083  *
0084  * When a page is mapped by the device for exclusive access we set the CPU page
0085  * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
0086  */
0087 #ifdef CONFIG_DEVICE_PRIVATE
0088 #define SWP_DEVICE_NUM 4
0089 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
0090 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
0091 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
0092 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
0093 #else
0094 #define SWP_DEVICE_NUM 0
0095 #endif
0096 
0097 /*
0098  * Page migration support.
0099  *
0100  * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
0101  * indicates that the referenced (part of) an anonymous page is exclusive to
0102  * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
0103  * (part of) an anonymous page that are mapped writable are exclusive to a
0104  * single process.
0105  */
0106 #ifdef CONFIG_MIGRATION
0107 #define SWP_MIGRATION_NUM 3
0108 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
0109 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
0110 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
0111 #else
0112 #define SWP_MIGRATION_NUM 0
0113 #endif
0114 
0115 /*
0116  * Handling of hardware poisoned pages with memory corruption.
0117  */
0118 #ifdef CONFIG_MEMORY_FAILURE
0119 #define SWP_HWPOISON_NUM 1
0120 #define SWP_HWPOISON        MAX_SWAPFILES
0121 #else
0122 #define SWP_HWPOISON_NUM 0
0123 #endif
0124 
0125 #define MAX_SWAPFILES \
0126     ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
0127     SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
0128     SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
0129 
0130 /*
0131  * Magic header for a swap area. The first part of the union is
0132  * what the swap magic looks like for the old (limited to 128MB)
0133  * swap area format, the second part of the union adds - in the
0134  * old reserved area - some extra information. Note that the first
0135  * kilobyte is reserved for boot loader or disk label stuff...
0136  *
0137  * Having the magic at the end of the PAGE_SIZE makes detecting swap
0138  * areas somewhat tricky on machines that support multiple page sizes.
0139  * For 2.5 we'll probably want to move the magic to just beyond the
0140  * bootbits...
0141  */
0142 union swap_header {
0143     struct {
0144         char reserved[PAGE_SIZE - 10];
0145         char magic[10];         /* SWAP-SPACE or SWAPSPACE2 */
0146     } magic;
0147     struct {
0148         char        bootbits[1024]; /* Space for disklabel etc. */
0149         __u32       version;
0150         __u32       last_page;
0151         __u32       nr_badpages;
0152         unsigned char   sws_uuid[16];
0153         unsigned char   sws_volume[16];
0154         __u32       padding[117];
0155         __u32       badpages[1];
0156     } info;
0157 };
0158 
0159 /*
0160  * current->reclaim_state points to one of these when a task is running
0161  * memory reclaim
0162  */
0163 struct reclaim_state {
0164     unsigned long reclaimed_slab;
0165 };
0166 
0167 #ifdef __KERNEL__
0168 
0169 struct address_space;
0170 struct sysinfo;
0171 struct writeback_control;
0172 struct zone;
0173 
0174 /*
0175  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
0176  * disk blocks.  A rbtree of swap extents maps the entire swapfile (Where the
0177  * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
0178  * from setup, they're handled identically.
0179  *
0180  * We always assume that blocks are of size PAGE_SIZE.
0181  */
0182 struct swap_extent {
0183     struct rb_node rb_node;
0184     pgoff_t start_page;
0185     pgoff_t nr_pages;
0186     sector_t start_block;
0187 };
0188 
0189 /*
0190  * Max bad pages in the new format..
0191  */
0192 #define MAX_SWAP_BADPAGES \
0193     ((offsetof(union swap_header, magic.magic) - \
0194       offsetof(union swap_header, info.badpages)) / sizeof(int))
0195 
0196 enum {
0197     SWP_USED    = (1 << 0), /* is slot in swap_info[] used? */
0198     SWP_WRITEOK = (1 << 1), /* ok to write to this swap?    */
0199     SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
0200     SWP_DISCARDING  = (1 << 3), /* now discarding a free cluster */
0201     SWP_SOLIDSTATE  = (1 << 4), /* blkdev seeks are cheap */
0202     SWP_CONTINUED   = (1 << 5), /* swap_map has count continuation */
0203     SWP_BLKDEV  = (1 << 6), /* its a block device */
0204     SWP_ACTIVATED   = (1 << 7), /* set after swap_activate success */
0205     SWP_FS_OPS  = (1 << 8), /* swapfile operations go through fs */
0206     SWP_AREA_DISCARD = (1 << 9),    /* single-time swap area discards */
0207     SWP_PAGE_DISCARD = (1 << 10),   /* freed swap page-cluster discards */
0208     SWP_STABLE_WRITES = (1 << 11),  /* no overwrite PG_writeback pages */
0209     SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
0210                     /* add others here before... */
0211     SWP_SCANNING    = (1 << 14),    /* refcount in scan_swap_map */
0212 };
0213 
0214 #define SWAP_CLUSTER_MAX 32UL
0215 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
0216 
0217 /* Bit flag in swap_map */
0218 #define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
0219 #define COUNT_CONTINUED 0x80    /* Flag swap_map continuation for full count */
0220 
0221 /* Special value in first swap_map */
0222 #define SWAP_MAP_MAX    0x3e    /* Max count */
0223 #define SWAP_MAP_BAD    0x3f    /* Note page is bad */
0224 #define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs */
0225 
0226 /* Special value in each swap_map continuation */
0227 #define SWAP_CONT_MAX   0x7f    /* Max count */
0228 
0229 /*
0230  * We use this to track usage of a cluster. A cluster is a block of swap disk
0231  * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
0232  * free clusters are organized into a list. We fetch an entry from the list to
0233  * get a free cluster.
0234  *
0235  * The data field stores next cluster if the cluster is free or cluster usage
0236  * counter otherwise. The flags field determines if a cluster is free. This is
0237  * protected by swap_info_struct.lock.
0238  */
0239 struct swap_cluster_info {
0240     spinlock_t lock;    /*
0241                  * Protect swap_cluster_info fields
0242                  * and swap_info_struct->swap_map
0243                  * elements correspond to the swap
0244                  * cluster
0245                  */
0246     unsigned int data:24;
0247     unsigned int flags:8;
0248 };
0249 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
0250 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
0251 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
0252 
0253 /*
0254  * We assign a cluster to each CPU, so each CPU can allocate swap entry from
0255  * its own cluster and swapout sequentially. The purpose is to optimize swapout
0256  * throughput.
0257  */
0258 struct percpu_cluster {
0259     struct swap_cluster_info index; /* Current cluster index */
0260     unsigned int next; /* Likely next allocation offset */
0261 };
0262 
0263 struct swap_cluster_list {
0264     struct swap_cluster_info head;
0265     struct swap_cluster_info tail;
0266 };
0267 
0268 /*
0269  * The in-memory structure used to track swap areas.
0270  */
0271 struct swap_info_struct {
0272     struct percpu_ref users;    /* indicate and keep swap device valid. */
0273     unsigned long   flags;      /* SWP_USED etc: see above */
0274     signed short    prio;       /* swap priority of this type */
0275     struct plist_node list;     /* entry in swap_active_head */
0276     signed char type;       /* strange name for an index */
0277     unsigned int    max;        /* extent of the swap_map */
0278     unsigned char *swap_map;    /* vmalloc'ed array of usage counts */
0279     struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
0280     struct swap_cluster_list free_clusters; /* free clusters list */
0281     unsigned int lowest_bit;    /* index of first free in swap_map */
0282     unsigned int highest_bit;   /* index of last free in swap_map */
0283     unsigned int pages;     /* total of usable pages of swap */
0284     unsigned int inuse_pages;   /* number of those currently in use */
0285     unsigned int cluster_next;  /* likely index for next allocation */
0286     unsigned int cluster_nr;    /* countdown to next cluster search */
0287     unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
0288     struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
0289     struct rb_root swap_extent_root;/* root of the swap extent rbtree */
0290     struct block_device *bdev;  /* swap device or bdev of swap file */
0291     struct file *swap_file;     /* seldom referenced */
0292     unsigned int old_block_size;    /* seldom referenced */
0293     struct completion comp;     /* seldom referenced */
0294 #ifdef CONFIG_FRONTSWAP
0295     unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
0296     atomic_t frontswap_pages;   /* frontswap pages in-use counter */
0297 #endif
0298     spinlock_t lock;        /*
0299                      * protect map scan related fields like
0300                      * swap_map, lowest_bit, highest_bit,
0301                      * inuse_pages, cluster_next,
0302                      * cluster_nr, lowest_alloc,
0303                      * highest_alloc, free/discard cluster
0304                      * list. other fields are only changed
0305                      * at swapon/swapoff, so are protected
0306                      * by swap_lock. changing flags need
0307                      * hold this lock and swap_lock. If
0308                      * both locks need hold, hold swap_lock
0309                      * first.
0310                      */
0311     spinlock_t cont_lock;       /*
0312                      * protect swap count continuation page
0313                      * list.
0314                      */
0315     struct work_struct discard_work; /* discard worker */
0316     struct swap_cluster_list discard_clusters; /* discard clusters list */
0317     struct plist_node avail_lists[]; /*
0318                        * entries in swap_avail_heads, one
0319                        * entry per node.
0320                        * Must be last as the number of the
0321                        * array is nr_node_ids, which is not
0322                        * a fixed value so have to allocate
0323                        * dynamically.
0324                        * And it has to be an array so that
0325                        * plist_for_each_* can work.
0326                        */
0327 };
0328 
0329 #ifdef CONFIG_64BIT
0330 #define SWAP_RA_ORDER_CEILING   5
0331 #else
0332 /* Avoid stack overflow, because we need to save part of page table */
0333 #define SWAP_RA_ORDER_CEILING   3
0334 #define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
0335 #endif
0336 
0337 struct vma_swap_readahead {
0338     unsigned short win;
0339     unsigned short offset;
0340     unsigned short nr_pte;
0341 #ifdef CONFIG_64BIT
0342     pte_t *ptes;
0343 #else
0344     pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
0345 #endif
0346 };
0347 
0348 static inline swp_entry_t folio_swap_entry(struct folio *folio)
0349 {
0350     swp_entry_t entry = { .val = page_private(&folio->page) };
0351     return entry;
0352 }
0353 
0354 /* linux/mm/workingset.c */
0355 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
0356 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
0357 void workingset_refault(struct folio *folio, void *shadow);
0358 void workingset_activation(struct folio *folio);
0359 
0360 /* Only track the nodes of mappings with shadow entries */
0361 void workingset_update_node(struct xa_node *node);
0362 extern struct list_lru shadow_nodes;
0363 #define mapping_set_update(xas, mapping) do {               \
0364     if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {     \
0365         xas_set_update(xas, workingset_update_node);        \
0366         xas_set_lru(xas, &shadow_nodes);            \
0367     }                               \
0368 } while (0)
0369 
0370 /* linux/mm/page_alloc.c */
0371 extern unsigned long totalreserve_pages;
0372 
0373 /* Definition of global_zone_page_state not available yet */
0374 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
0375 
0376 
0377 /* linux/mm/swap.c */
0378 extern void lru_note_cost(struct lruvec *lruvec, bool file,
0379               unsigned int nr_pages);
0380 extern void lru_note_cost_folio(struct folio *);
0381 extern void folio_add_lru(struct folio *);
0382 extern void lru_cache_add(struct page *);
0383 void mark_page_accessed(struct page *);
0384 void folio_mark_accessed(struct folio *);
0385 
0386 extern atomic_t lru_disable_count;
0387 
0388 static inline bool lru_cache_disabled(void)
0389 {
0390     return atomic_read(&lru_disable_count);
0391 }
0392 
0393 static inline void lru_cache_enable(void)
0394 {
0395     atomic_dec(&lru_disable_count);
0396 }
0397 
0398 extern void lru_cache_disable(void);
0399 extern void lru_add_drain(void);
0400 extern void lru_add_drain_cpu(int cpu);
0401 extern void lru_add_drain_cpu_zone(struct zone *zone);
0402 extern void lru_add_drain_all(void);
0403 extern void deactivate_page(struct page *page);
0404 extern void mark_page_lazyfree(struct page *page);
0405 extern void swap_setup(void);
0406 
0407 extern void lru_cache_add_inactive_or_unevictable(struct page *page,
0408                         struct vm_area_struct *vma);
0409 
0410 /* linux/mm/vmscan.c */
0411 extern unsigned long zone_reclaimable_pages(struct zone *zone);
0412 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
0413                     gfp_t gfp_mask, nodemask_t *mask);
0414 
0415 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
0416 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
0417 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
0418                           unsigned long nr_pages,
0419                           gfp_t gfp_mask,
0420                           unsigned int reclaim_options);
0421 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
0422                         gfp_t gfp_mask, bool noswap,
0423                         pg_data_t *pgdat,
0424                         unsigned long *nr_scanned);
0425 extern unsigned long shrink_all_memory(unsigned long nr_pages);
0426 extern int vm_swappiness;
0427 long remove_mapping(struct address_space *mapping, struct folio *folio);
0428 
0429 extern unsigned long reclaim_pages(struct list_head *page_list);
0430 #ifdef CONFIG_NUMA
0431 extern int node_reclaim_mode;
0432 extern int sysctl_min_unmapped_ratio;
0433 extern int sysctl_min_slab_ratio;
0434 #else
0435 #define node_reclaim_mode 0
0436 #endif
0437 
0438 static inline bool node_reclaim_enabled(void)
0439 {
0440     /* Is any node_reclaim_mode bit set? */
0441     return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
0442 }
0443 
0444 void check_move_unevictable_folios(struct folio_batch *fbatch);
0445 void check_move_unevictable_pages(struct pagevec *pvec);
0446 
0447 extern void kswapd_run(int nid);
0448 extern void kswapd_stop(int nid);
0449 
0450 #ifdef CONFIG_SWAP
0451 
0452 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
0453         unsigned long nr_pages, sector_t start_block);
0454 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
0455         sector_t *);
0456 
0457 static inline unsigned long total_swapcache_pages(void)
0458 {
0459     return global_node_page_state(NR_SWAPCACHE);
0460 }
0461 
0462 extern void free_swap_cache(struct page *page);
0463 extern void free_page_and_swap_cache(struct page *);
0464 extern void free_pages_and_swap_cache(struct page **, int);
0465 /* linux/mm/swapfile.c */
0466 extern atomic_long_t nr_swap_pages;
0467 extern long total_swap_pages;
0468 extern atomic_t nr_rotate_swap;
0469 extern bool has_usable_swap(void);
0470 
0471 /* Swap 50% full? Release swapcache more aggressively.. */
0472 static inline bool vm_swap_full(void)
0473 {
0474     return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
0475 }
0476 
0477 static inline long get_nr_swap_pages(void)
0478 {
0479     return atomic_long_read(&nr_swap_pages);
0480 }
0481 
0482 extern void si_swapinfo(struct sysinfo *);
0483 swp_entry_t folio_alloc_swap(struct folio *folio);
0484 extern void put_swap_page(struct page *page, swp_entry_t entry);
0485 extern swp_entry_t get_swap_page_of_type(int);
0486 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
0487 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
0488 extern void swap_shmem_alloc(swp_entry_t);
0489 extern int swap_duplicate(swp_entry_t);
0490 extern int swapcache_prepare(swp_entry_t);
0491 extern void swap_free(swp_entry_t);
0492 extern void swapcache_free_entries(swp_entry_t *entries, int n);
0493 extern int free_swap_and_cache(swp_entry_t);
0494 int swap_type_of(dev_t device, sector_t offset);
0495 int find_first_swap(dev_t *device);
0496 extern unsigned int count_swap_pages(int, int);
0497 extern sector_t swapdev_block(int, pgoff_t);
0498 extern int __swap_count(swp_entry_t entry);
0499 extern int __swp_swapcount(swp_entry_t entry);
0500 extern int swp_swapcount(swp_entry_t entry);
0501 extern struct swap_info_struct *page_swap_info(struct page *);
0502 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
0503 extern int try_to_free_swap(struct page *);
0504 struct backing_dev_info;
0505 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
0506 extern void exit_swap_address_space(unsigned int type);
0507 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
0508 sector_t swap_page_sector(struct page *page);
0509 
0510 static inline void put_swap_device(struct swap_info_struct *si)
0511 {
0512     percpu_ref_put(&si->users);
0513 }
0514 
0515 #else /* CONFIG_SWAP */
0516 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
0517 {
0518     return NULL;
0519 }
0520 
0521 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
0522 {
0523     return NULL;
0524 }
0525 
0526 static inline void put_swap_device(struct swap_info_struct *si)
0527 {
0528 }
0529 
0530 #define get_nr_swap_pages()         0L
0531 #define total_swap_pages            0L
0532 #define total_swapcache_pages()         0UL
0533 #define vm_swap_full()              0
0534 
0535 #define si_swapinfo(val) \
0536     do { (val)->freeswap = (val)->totalswap = 0; } while (0)
0537 /* only sparc can not include linux/pagemap.h in this file
0538  * so leave put_page and release_pages undeclared... */
0539 #define free_page_and_swap_cache(page) \
0540     put_page(page)
0541 #define free_pages_and_swap_cache(pages, nr) \
0542     release_pages((pages), (nr));
0543 
0544 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
0545 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
0546 
0547 static inline void free_swap_cache(struct page *page)
0548 {
0549 }
0550 
0551 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
0552 {
0553     return 0;
0554 }
0555 
0556 static inline void swap_shmem_alloc(swp_entry_t swp)
0557 {
0558 }
0559 
0560 static inline int swap_duplicate(swp_entry_t swp)
0561 {
0562     return 0;
0563 }
0564 
0565 static inline void swap_free(swp_entry_t swp)
0566 {
0567 }
0568 
0569 static inline void put_swap_page(struct page *page, swp_entry_t swp)
0570 {
0571 }
0572 
0573 static inline int __swap_count(swp_entry_t entry)
0574 {
0575     return 0;
0576 }
0577 
0578 static inline int __swp_swapcount(swp_entry_t entry)
0579 {
0580     return 0;
0581 }
0582 
0583 static inline int swp_swapcount(swp_entry_t entry)
0584 {
0585     return 0;
0586 }
0587 
0588 static inline int try_to_free_swap(struct page *page)
0589 {
0590     return 0;
0591 }
0592 
0593 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
0594 {
0595     swp_entry_t entry;
0596     entry.val = 0;
0597     return entry;
0598 }
0599 
0600 static inline int add_swap_extent(struct swap_info_struct *sis,
0601                   unsigned long start_page,
0602                   unsigned long nr_pages, sector_t start_block)
0603 {
0604     return -EINVAL;
0605 }
0606 #endif /* CONFIG_SWAP */
0607 
0608 #ifdef CONFIG_THP_SWAP
0609 extern int split_swap_cluster(swp_entry_t entry);
0610 #else
0611 static inline int split_swap_cluster(swp_entry_t entry)
0612 {
0613     return 0;
0614 }
0615 #endif
0616 
0617 #ifdef CONFIG_MEMCG
0618 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
0619 {
0620     /* Cgroup2 doesn't have per-cgroup swappiness */
0621     if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
0622         return vm_swappiness;
0623 
0624     /* root ? */
0625     if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
0626         return vm_swappiness;
0627 
0628     return memcg->swappiness;
0629 }
0630 #else
0631 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
0632 {
0633     return vm_swappiness;
0634 }
0635 #endif
0636 
0637 #ifdef CONFIG_ZSWAP
0638 extern u64 zswap_pool_total_size;
0639 extern atomic_t zswap_stored_pages;
0640 #endif
0641 
0642 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
0643 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
0644 static inline  void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
0645 {
0646     if (mem_cgroup_disabled())
0647         return;
0648     __cgroup_throttle_swaprate(page, gfp_mask);
0649 }
0650 #else
0651 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
0652 {
0653 }
0654 #endif
0655 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
0656 {
0657     cgroup_throttle_swaprate(&folio->page, gfp);
0658 }
0659 
0660 #ifdef CONFIG_MEMCG_SWAP
0661 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
0662 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
0663 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
0664         swp_entry_t entry)
0665 {
0666     if (mem_cgroup_disabled())
0667         return 0;
0668     return __mem_cgroup_try_charge_swap(folio, entry);
0669 }
0670 
0671 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
0672 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
0673 {
0674     if (mem_cgroup_disabled())
0675         return;
0676     __mem_cgroup_uncharge_swap(entry, nr_pages);
0677 }
0678 
0679 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
0680 extern bool mem_cgroup_swap_full(struct page *page);
0681 #else
0682 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
0683 {
0684 }
0685 
0686 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
0687                          swp_entry_t entry)
0688 {
0689     return 0;
0690 }
0691 
0692 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
0693                         unsigned int nr_pages)
0694 {
0695 }
0696 
0697 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
0698 {
0699     return get_nr_swap_pages();
0700 }
0701 
0702 static inline bool mem_cgroup_swap_full(struct page *page)
0703 {
0704     return vm_swap_full();
0705 }
0706 #endif
0707 
0708 #endif /* __KERNEL__*/
0709 #endif /* _LINUX_SWAP_H */