Back to home page

LXR

 
 

    


0001 /*
0002  *  linux/arch/cris/arch-v10/mm/tlb.c
0003  *
0004  *  Low level TLB handling
0005  *
0006  *
0007  *  Copyright (C) 2000-2007  Axis Communications AB
0008  *
0009  *  Authors:   Bjorn Wesen (bjornw@axis.com)
0010  *
0011  */
0012 
0013 #include <asm/tlb.h>
0014 #include <asm/mmu_context.h>
0015 #include <arch/svinto.h>
0016 
0017 #define D(x)
0018 
0019 /* The TLB can host up to 64 different mm contexts at the same time.
0020  * The running context is R_MMU_CONTEXT, and each TLB entry contains a
0021  * page_id that has to match to give a hit. In page_id_map, we keep track
0022  * of which mm's we have assigned which page_id's, so that we know when
0023  * to invalidate TLB entries.
0024  *
0025  * The last page_id is never running - it is used as an invalid page_id
0026  * so we can make TLB entries that will never match.
0027  *
0028  * Notice that we need to make the flushes atomic, otherwise an interrupt
0029  * handler that uses vmalloced memory might cause a TLB load in the middle
0030  * of a flush causing.
0031  */
0032 
0033 /* invalidate all TLB entries */
0034 
0035 void
0036 flush_tlb_all(void)
0037 {
0038     int i;
0039     unsigned long flags;
0040 
0041     /* the vpn of i & 0xf is so we dont write similar TLB entries
0042      * in the same 4-way entry group. details...
0043      */
0044 
0045     local_irq_save(flags);
0046     for(i = 0; i < NUM_TLB_ENTRIES; i++) {
0047         *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
0048         *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
0049                   IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
0050 
0051         *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no       ) |
0052                   IO_STATE(R_TLB_LO, valid, no       ) |
0053                   IO_STATE(R_TLB_LO, kernel,no   ) |
0054                   IO_STATE(R_TLB_LO, we,    no       ) |
0055                   IO_FIELD(R_TLB_LO, pfn,   0        ) );
0056     }
0057     local_irq_restore(flags);
0058     D(printk("tlb: flushed all\n"));
0059 }
0060 
0061 /* invalidate the selected mm context only */
0062 
0063 void
0064 flush_tlb_mm(struct mm_struct *mm)
0065 {
0066     int i;
0067     int page_id = mm->context.page_id;
0068     unsigned long flags;
0069 
0070     D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
0071 
0072     if(page_id == NO_CONTEXT)
0073         return;
0074 
0075     /* mark the TLB entries that match the page_id as invalid.
0076      * here we could also check the _PAGE_GLOBAL bit and NOT flush
0077      * global pages. is it worth the extra I/O ?
0078      */
0079 
0080     local_irq_save(flags);
0081     for(i = 0; i < NUM_TLB_ENTRIES; i++) {
0082         *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
0083         if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
0084             *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
0085                       IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
0086 
0087             *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
0088                       IO_STATE(R_TLB_LO, valid, no  ) |
0089                       IO_STATE(R_TLB_LO, kernel,no  ) |
0090                       IO_STATE(R_TLB_LO, we,    no  ) |
0091                       IO_FIELD(R_TLB_LO, pfn,   0   ) );
0092         }
0093     }
0094     local_irq_restore(flags);
0095 }
0096 
0097 /* invalidate a single page */
0098 
0099 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
0100 {
0101     struct mm_struct *mm = vma->vm_mm;
0102     int page_id = mm->context.page_id;
0103     int i;
0104     unsigned long flags;
0105 
0106     D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
0107 
0108     if(page_id == NO_CONTEXT)
0109         return;
0110 
0111     addr &= PAGE_MASK; /* perhaps not necessary */
0112 
0113     /* invalidate those TLB entries that match both the mm context
0114      * and the virtual address requested
0115      */
0116 
0117     local_irq_save(flags);
0118     for(i = 0; i < NUM_TLB_ENTRIES; i++) {
0119         unsigned long tlb_hi;
0120         *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
0121         tlb_hi = *R_TLB_HI;
0122         if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
0123             (tlb_hi & PAGE_MASK) == addr) {
0124             *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
0125                 addr; /* same addr as before works. */
0126 
0127             *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
0128                       IO_STATE(R_TLB_LO, valid, no  ) |
0129                       IO_STATE(R_TLB_LO, kernel,no  ) |
0130                       IO_STATE(R_TLB_LO, we,    no  ) |
0131                       IO_FIELD(R_TLB_LO, pfn,   0   ) );
0132         }
0133     }
0134     local_irq_restore(flags);
0135 }
0136 
0137 /*
0138  * Initialize the context related info for a new mm_struct
0139  * instance.
0140  */
0141 
0142 int
0143 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0144 {
0145     mm->context.page_id = NO_CONTEXT;
0146     return 0;
0147 }
0148 
0149 /* called in schedule() just before actually doing the switch_to */
0150 
0151 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
0152     struct task_struct *tsk)
0153 {
0154     if (prev != next) {
0155         /* make sure we have a context */
0156         get_mmu_context(next);
0157 
0158         /* remember the pgd for the fault handlers
0159          * this is similar to the pgd register in some other CPU's.
0160          * we need our own copy of it because current and active_mm
0161          * might be invalid at points where we still need to derefer
0162          * the pgd.
0163          */
0164 
0165         per_cpu(current_pgd, smp_processor_id()) = next->pgd;
0166 
0167         /* switch context in the MMU */
0168 
0169         D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
0170             next->context, next));
0171 
0172         *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
0173                       page_id, next->context.page_id);
0174     }
0175 }
0176