Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * NUMA memory policies for Linux.
0004  * Copyright 2003,2004 Andi Kleen SuSE Labs
0005  */
0006 #ifndef _LINUX_MEMPOLICY_H
0007 #define _LINUX_MEMPOLICY_H 1
0008 
0009 #include <linux/sched.h>
0010 #include <linux/mmzone.h>
0011 #include <linux/slab.h>
0012 #include <linux/rbtree.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/nodemask.h>
0015 #include <linux/pagemap.h>
0016 #include <uapi/linux/mempolicy.h>
0017 
0018 struct mm_struct;
0019 
0020 #ifdef CONFIG_NUMA
0021 
0022 /*
0023  * Describe a memory policy.
0024  *
0025  * A mempolicy can be either associated with a process or with a VMA.
0026  * For VMA related allocations the VMA policy is preferred, otherwise
0027  * the process policy is used. Interrupts ignore the memory policy
0028  * of the current process.
0029  *
0030  * Locking policy for interleave:
0031  * In process context there is no locking because only the process accesses
0032  * its own state. All vma manipulation is somewhat protected by a down_read on
0033  * mmap_lock.
0034  *
0035  * Freeing policy:
0036  * Mempolicy objects are reference counted.  A mempolicy will be freed when
0037  * mpol_put() decrements the reference count to zero.
0038  *
0039  * Duplicating policy objects:
0040  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
0041  * to the new storage.  The reference count of the new object is initialized
0042  * to 1, representing the caller of mpol_dup().
0043  */
0044 struct mempolicy {
0045     atomic_t refcnt;
0046     unsigned short mode;    /* See MPOL_* above */
0047     unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
0048     nodemask_t nodes;   /* interleave/bind/perfer */
0049     int home_node;      /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
0050 
0051     union {
0052         nodemask_t cpuset_mems_allowed; /* relative to these nodes */
0053         nodemask_t user_nodemask;   /* nodemask passed by user */
0054     } w;
0055 };
0056 
0057 /*
0058  * Support for managing mempolicy data objects (clone, copy, destroy)
0059  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
0060  */
0061 
0062 extern void __mpol_put(struct mempolicy *pol);
0063 static inline void mpol_put(struct mempolicy *pol)
0064 {
0065     if (pol)
0066         __mpol_put(pol);
0067 }
0068 
0069 /*
0070  * Does mempolicy pol need explicit unref after use?
0071  * Currently only needed for shared policies.
0072  */
0073 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
0074 {
0075     return (pol && (pol->flags & MPOL_F_SHARED));
0076 }
0077 
0078 static inline void mpol_cond_put(struct mempolicy *pol)
0079 {
0080     if (mpol_needs_cond_ref(pol))
0081         __mpol_put(pol);
0082 }
0083 
0084 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
0085 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
0086 {
0087     if (pol)
0088         pol = __mpol_dup(pol);
0089     return pol;
0090 }
0091 
0092 #define vma_policy(vma) ((vma)->vm_policy)
0093 
0094 static inline void mpol_get(struct mempolicy *pol)
0095 {
0096     if (pol)
0097         atomic_inc(&pol->refcnt);
0098 }
0099 
0100 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
0101 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
0102 {
0103     if (a == b)
0104         return true;
0105     return __mpol_equal(a, b);
0106 }
0107 
0108 /*
0109  * Tree of shared policies for a shared memory region.
0110  * Maintain the policies in a pseudo mm that contains vmas. The vmas
0111  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
0112  * bytes, so that we can work with shared memory segments bigger than
0113  * unsigned long.
0114  */
0115 
0116 struct sp_node {
0117     struct rb_node nd;
0118     unsigned long start, end;
0119     struct mempolicy *policy;
0120 };
0121 
0122 struct shared_policy {
0123     struct rb_root root;
0124     rwlock_t lock;
0125 };
0126 
0127 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
0128 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
0129 int mpol_set_shared_policy(struct shared_policy *info,
0130                 struct vm_area_struct *vma,
0131                 struct mempolicy *new);
0132 void mpol_free_shared_policy(struct shared_policy *p);
0133 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
0134                         unsigned long idx);
0135 
0136 struct mempolicy *get_task_policy(struct task_struct *p);
0137 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
0138         unsigned long addr);
0139 bool vma_policy_mof(struct vm_area_struct *vma);
0140 
0141 extern void numa_default_policy(void);
0142 extern void numa_policy_init(void);
0143 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
0144 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
0145 
0146 extern int huge_node(struct vm_area_struct *vma,
0147                 unsigned long addr, gfp_t gfp_flags,
0148                 struct mempolicy **mpol, nodemask_t **nodemask);
0149 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
0150 extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
0151                 const nodemask_t *mask);
0152 extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
0153 
0154 static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
0155 {
0156     struct mempolicy *mpol = get_task_policy(current);
0157 
0158     return policy_nodemask(gfp, mpol);
0159 }
0160 
0161 extern unsigned int mempolicy_slab_node(void);
0162 
0163 extern enum zone_type policy_zone;
0164 
0165 static inline void check_highest_zone(enum zone_type k)
0166 {
0167     if (k > policy_zone && k != ZONE_MOVABLE)
0168         policy_zone = k;
0169 }
0170 
0171 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
0172              const nodemask_t *to, int flags);
0173 
0174 
0175 #ifdef CONFIG_TMPFS
0176 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
0177 #endif
0178 
0179 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
0180 
0181 /* Check if a vma is migratable */
0182 extern bool vma_migratable(struct vm_area_struct *vma);
0183 
0184 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
0185 extern void mpol_put_task_policy(struct task_struct *);
0186 
0187 static inline bool mpol_is_preferred_many(struct mempolicy *pol)
0188 {
0189     return  (pol->mode == MPOL_PREFERRED_MANY);
0190 }
0191 
0192 
0193 #else
0194 
0195 struct mempolicy {};
0196 
0197 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
0198 {
0199     return true;
0200 }
0201 
0202 static inline void mpol_put(struct mempolicy *p)
0203 {
0204 }
0205 
0206 static inline void mpol_cond_put(struct mempolicy *pol)
0207 {
0208 }
0209 
0210 static inline void mpol_get(struct mempolicy *pol)
0211 {
0212 }
0213 
0214 struct shared_policy {};
0215 
0216 static inline void mpol_shared_policy_init(struct shared_policy *sp,
0217                         struct mempolicy *mpol)
0218 {
0219 }
0220 
0221 static inline void mpol_free_shared_policy(struct shared_policy *p)
0222 {
0223 }
0224 
0225 static inline struct mempolicy *
0226 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
0227 {
0228     return NULL;
0229 }
0230 
0231 #define vma_policy(vma) NULL
0232 
0233 static inline int
0234 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
0235 {
0236     return 0;
0237 }
0238 
0239 static inline void numa_policy_init(void)
0240 {
0241 }
0242 
0243 static inline void numa_default_policy(void)
0244 {
0245 }
0246 
0247 static inline void mpol_rebind_task(struct task_struct *tsk,
0248                 const nodemask_t *new)
0249 {
0250 }
0251 
0252 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
0253 {
0254 }
0255 
0256 static inline int huge_node(struct vm_area_struct *vma,
0257                 unsigned long addr, gfp_t gfp_flags,
0258                 struct mempolicy **mpol, nodemask_t **nodemask)
0259 {
0260     *mpol = NULL;
0261     *nodemask = NULL;
0262     return 0;
0263 }
0264 
0265 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
0266 {
0267     return false;
0268 }
0269 
0270 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
0271                    const nodemask_t *to, int flags)
0272 {
0273     return 0;
0274 }
0275 
0276 static inline void check_highest_zone(int k)
0277 {
0278 }
0279 
0280 #ifdef CONFIG_TMPFS
0281 static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
0282 {
0283     return 1;   /* error */
0284 }
0285 #endif
0286 
0287 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
0288                  unsigned long address)
0289 {
0290     return -1; /* no node preference */
0291 }
0292 
0293 static inline void mpol_put_task_policy(struct task_struct *task)
0294 {
0295 }
0296 
0297 static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
0298 {
0299     return NULL;
0300 }
0301 
0302 static inline bool mpol_is_preferred_many(struct mempolicy *pol)
0303 {
0304     return  false;
0305 }
0306 
0307 #endif /* CONFIG_NUMA */
0308 #endif