Back to home page

LXR

 
 

    


0001 /* Copyright (C) 2009 Red Hat, Inc.
0002  *
0003  * See ../COPYING for licensing terms.
0004  */
0005 
0006 #include <linux/mm.h>
0007 #include <linux/sched.h>
0008 #include <linux/mmu_context.h>
0009 #include <linux/export.h>
0010 
0011 #include <asm/mmu_context.h>
0012 
0013 /*
0014  * use_mm
0015  *  Makes the calling kernel thread take on the specified
0016  *  mm context.
0017  *  (Note: this routine is intended to be called only
0018  *  from a kernel thread context)
0019  */
0020 void use_mm(struct mm_struct *mm)
0021 {
0022     struct mm_struct *active_mm;
0023     struct task_struct *tsk = current;
0024 
0025     task_lock(tsk);
0026     active_mm = tsk->active_mm;
0027     if (active_mm != mm) {
0028         atomic_inc(&mm->mm_count);
0029         tsk->active_mm = mm;
0030     }
0031     tsk->mm = mm;
0032     switch_mm(active_mm, mm, tsk);
0033     task_unlock(tsk);
0034 #ifdef finish_arch_post_lock_switch
0035     finish_arch_post_lock_switch();
0036 #endif
0037 
0038     if (active_mm != mm)
0039         mmdrop(active_mm);
0040 }
0041 EXPORT_SYMBOL_GPL(use_mm);
0042 
0043 /*
0044  * unuse_mm
0045  *  Reverses the effect of use_mm, i.e. releases the
0046  *  specified mm context which was earlier taken on
0047  *  by the calling kernel thread
0048  *  (Note: this routine is intended to be called only
0049  *  from a kernel thread context)
0050  */
0051 void unuse_mm(struct mm_struct *mm)
0052 {
0053     struct task_struct *tsk = current;
0054 
0055     task_lock(tsk);
0056     sync_mm_rss(mm);
0057     tsk->mm = NULL;
0058     /* active_mm is still 'mm' */
0059     enter_lazy_tlb(mm, tsk);
0060     task_unlock(tsk);
0061 }
0062 EXPORT_SYMBOL_GPL(unuse_mm);