0001
0002 #ifndef _LINUX_SCHED_MM_H
0003 #define _LINUX_SCHED_MM_H
0004
0005 #include <linux/kernel.h>
0006 #include <linux/atomic.h>
0007 #include <linux/sched.h>
0008 #include <linux/mm_types.h>
0009 #include <linux/gfp.h>
0010 #include <linux/sync_core.h>
0011 #include <linux/ioasid.h>
0012
0013
0014
0015
0016 extern struct mm_struct *mm_alloc(void);
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 static inline void mmgrab(struct mm_struct *mm)
0036 {
0037 atomic_inc(&mm->mm_count);
0038 }
0039
0040 extern void __mmdrop(struct mm_struct *mm);
0041
0042 static inline void mmdrop(struct mm_struct *mm)
0043 {
0044
0045
0046
0047
0048
0049 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
0050 __mmdrop(mm);
0051 }
0052
0053 #ifdef CONFIG_PREEMPT_RT
0054
0055
0056
0057
0058 static inline void __mmdrop_delayed(struct rcu_head *rhp)
0059 {
0060 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
0061
0062 __mmdrop(mm);
0063 }
0064
0065
0066
0067
0068
0069 static inline void mmdrop_sched(struct mm_struct *mm)
0070 {
0071
0072 if (atomic_dec_and_test(&mm->mm_count))
0073 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
0074 }
0075 #else
0076 static inline void mmdrop_sched(struct mm_struct *mm)
0077 {
0078 mmdrop(mm);
0079 }
0080 #endif
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 static inline void mmget(struct mm_struct *mm)
0099 {
0100 atomic_inc(&mm->mm_users);
0101 }
0102
0103 static inline bool mmget_not_zero(struct mm_struct *mm)
0104 {
0105 return atomic_inc_not_zero(&mm->mm_users);
0106 }
0107
0108
0109 extern void mmput(struct mm_struct *);
0110 #ifdef CONFIG_MMU
0111
0112
0113
0114 void mmput_async(struct mm_struct *);
0115 #endif
0116
0117
0118 extern struct mm_struct *get_task_mm(struct task_struct *task);
0119
0120
0121
0122
0123
0124 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
0125
0126 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
0127
0128 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
0129
0130 #ifdef CONFIG_MEMCG
0131 extern void mm_update_next_owner(struct mm_struct *mm);
0132 #else
0133 static inline void mm_update_next_owner(struct mm_struct *mm)
0134 {
0135 }
0136 #endif
0137
0138 #ifdef CONFIG_MMU
0139 #ifndef arch_get_mmap_end
0140 #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
0141 #endif
0142
0143 #ifndef arch_get_mmap_base
0144 #define arch_get_mmap_base(addr, base) (base)
0145 #endif
0146
0147 extern void arch_pick_mmap_layout(struct mm_struct *mm,
0148 struct rlimit *rlim_stack);
0149 extern unsigned long
0150 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
0151 unsigned long, unsigned long);
0152 extern unsigned long
0153 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
0154 unsigned long len, unsigned long pgoff,
0155 unsigned long flags);
0156
0157 unsigned long
0158 generic_get_unmapped_area(struct file *filp, unsigned long addr,
0159 unsigned long len, unsigned long pgoff,
0160 unsigned long flags);
0161 unsigned long
0162 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
0163 unsigned long len, unsigned long pgoff,
0164 unsigned long flags);
0165 #else
0166 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
0167 struct rlimit *rlim_stack) {}
0168 #endif
0169
0170 static inline bool in_vfork(struct task_struct *tsk)
0171 {
0172 bool ret;
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 rcu_read_lock();
0190 ret = tsk->vfork_done &&
0191 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
0192 rcu_read_unlock();
0193
0194 return ret;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203 static inline gfp_t current_gfp_context(gfp_t flags)
0204 {
0205 unsigned int pflags = READ_ONCE(current->flags);
0206
0207 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
0208
0209
0210
0211
0212 if (pflags & PF_MEMALLOC_NOIO)
0213 flags &= ~(__GFP_IO | __GFP_FS);
0214 else if (pflags & PF_MEMALLOC_NOFS)
0215 flags &= ~__GFP_FS;
0216
0217 if (pflags & PF_MEMALLOC_PIN)
0218 flags &= ~__GFP_MOVABLE;
0219 }
0220 return flags;
0221 }
0222
0223 #ifdef CONFIG_LOCKDEP
0224 extern void __fs_reclaim_acquire(unsigned long ip);
0225 extern void __fs_reclaim_release(unsigned long ip);
0226 extern void fs_reclaim_acquire(gfp_t gfp_mask);
0227 extern void fs_reclaim_release(gfp_t gfp_mask);
0228 #else
0229 static inline void __fs_reclaim_acquire(unsigned long ip) { }
0230 static inline void __fs_reclaim_release(unsigned long ip) { }
0231 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
0232 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
0233 #endif
0234
0235
0236
0237
0238
0239
0240
0241
0242 static inline void memalloc_retry_wait(gfp_t gfp_flags)
0243 {
0244
0245
0246
0247
0248 __set_current_state(TASK_UNINTERRUPTIBLE);
0249 gfp_flags = current_gfp_context(gfp_flags);
0250 if (gfpflags_allow_blocking(gfp_flags) &&
0251 !(gfp_flags & __GFP_NORETRY))
0252
0253 io_schedule_timeout(1);
0254 else
0255
0256
0257
0258 io_schedule_timeout(HZ/50);
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 static inline void might_alloc(gfp_t gfp_mask)
0270 {
0271 fs_reclaim_acquire(gfp_mask);
0272 fs_reclaim_release(gfp_mask);
0273
0274 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 static inline unsigned int memalloc_noio_save(void)
0289 {
0290 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
0291 current->flags |= PF_MEMALLOC_NOIO;
0292 return flags;
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 static inline void memalloc_noio_restore(unsigned int flags)
0304 {
0305 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 static inline unsigned int memalloc_nofs_save(void)
0320 {
0321 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
0322 current->flags |= PF_MEMALLOC_NOFS;
0323 return flags;
0324 }
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 static inline void memalloc_nofs_restore(unsigned int flags)
0335 {
0336 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
0337 }
0338
0339 static inline unsigned int memalloc_noreclaim_save(void)
0340 {
0341 unsigned int flags = current->flags & PF_MEMALLOC;
0342 current->flags |= PF_MEMALLOC;
0343 return flags;
0344 }
0345
0346 static inline void memalloc_noreclaim_restore(unsigned int flags)
0347 {
0348 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
0349 }
0350
0351 static inline unsigned int memalloc_pin_save(void)
0352 {
0353 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
0354
0355 current->flags |= PF_MEMALLOC_PIN;
0356 return flags;
0357 }
0358
0359 static inline void memalloc_pin_restore(unsigned int flags)
0360 {
0361 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
0362 }
0363
0364 #ifdef CONFIG_MEMCG
0365 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 static inline struct mem_cgroup *
0378 set_active_memcg(struct mem_cgroup *memcg)
0379 {
0380 struct mem_cgroup *old;
0381
0382 if (!in_task()) {
0383 old = this_cpu_read(int_active_memcg);
0384 this_cpu_write(int_active_memcg, memcg);
0385 } else {
0386 old = current->active_memcg;
0387 current->active_memcg = memcg;
0388 }
0389
0390 return old;
0391 }
0392 #else
0393 static inline struct mem_cgroup *
0394 set_active_memcg(struct mem_cgroup *memcg)
0395 {
0396 return NULL;
0397 }
0398 #endif
0399
0400 #ifdef CONFIG_MEMBARRIER
0401 enum {
0402 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
0403 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
0404 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
0405 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
0406 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
0407 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
0408 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
0409 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
0410 };
0411
0412 enum {
0413 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
0414 MEMBARRIER_FLAG_RSEQ = (1U << 1),
0415 };
0416
0417 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
0418 #include <asm/membarrier.h>
0419 #endif
0420
0421 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
0422 {
0423 if (current->mm != mm)
0424 return;
0425 if (likely(!(atomic_read(&mm->membarrier_state) &
0426 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
0427 return;
0428 sync_core_before_usermode();
0429 }
0430
0431 extern void membarrier_exec_mmap(struct mm_struct *mm);
0432
0433 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
0434
0435 #else
0436 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
0437 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
0438 struct mm_struct *next,
0439 struct task_struct *tsk)
0440 {
0441 }
0442 #endif
0443 static inline void membarrier_exec_mmap(struct mm_struct *mm)
0444 {
0445 }
0446 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
0447 {
0448 }
0449 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
0450 {
0451 }
0452 #endif
0453
0454 #ifdef CONFIG_IOMMU_SVA
0455 static inline void mm_pasid_init(struct mm_struct *mm)
0456 {
0457 mm->pasid = INVALID_IOASID;
0458 }
0459
0460
0461 static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
0462 {
0463 mm->pasid = pasid;
0464 }
0465
0466 static inline void mm_pasid_drop(struct mm_struct *mm)
0467 {
0468 if (pasid_valid(mm->pasid)) {
0469 ioasid_free(mm->pasid);
0470 mm->pasid = INVALID_IOASID;
0471 }
0472 }
0473 #else
0474 static inline void mm_pasid_init(struct mm_struct *mm) {}
0475 static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
0476 static inline void mm_pasid_drop(struct mm_struct *mm) {}
0477 #endif
0478
0479 #endif