Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* arch/sparc64/mm/tsb.c
0003  *
0004  * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
0005  */
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/preempt.h>
0009 #include <linux/slab.h>
0010 #include <linux/mm_types.h>
0011 #include <linux/pgtable.h>
0012 
0013 #include <asm/page.h>
0014 #include <asm/mmu_context.h>
0015 #include <asm/setup.h>
0016 #include <asm/tsb.h>
0017 #include <asm/tlb.h>
0018 #include <asm/oplib.h>
0019 
0020 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
0021 
0022 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
0023 {
0024     vaddr >>= hash_shift;
0025     return vaddr & (nentries - 1);
0026 }
0027 
0028 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
0029 {
0030     return (tag == (vaddr >> 22));
0031 }
0032 
0033 static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
0034 {
0035     unsigned long idx;
0036 
0037     for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
0038         struct tsb *ent = &swapper_tsb[idx];
0039         unsigned long match = idx << 13;
0040 
0041         match |= (ent->tag << 22);
0042         if (match >= start && match < end)
0043             ent->tag = (1UL << TSB_TAG_INVALID_BIT);
0044     }
0045 }
0046 
0047 /* TSB flushes need only occur on the processor initiating the address
0048  * space modification, not on each cpu the address space has run on.
0049  * Only the TLB flush needs that treatment.
0050  */
0051 
0052 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
0053 {
0054     unsigned long v;
0055 
0056     if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
0057         return flush_tsb_kernel_range_scan(start, end);
0058 
0059     for (v = start; v < end; v += PAGE_SIZE) {
0060         unsigned long hash = tsb_hash(v, PAGE_SHIFT,
0061                           KERNEL_TSB_NENTRIES);
0062         struct tsb *ent = &swapper_tsb[hash];
0063 
0064         if (tag_compare(ent->tag, v))
0065             ent->tag = (1UL << TSB_TAG_INVALID_BIT);
0066     }
0067 }
0068 
0069 static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
0070                   unsigned long hash_shift,
0071                   unsigned long nentries)
0072 {
0073     unsigned long tag, ent, hash;
0074 
0075     v &= ~0x1UL;
0076     hash = tsb_hash(v, hash_shift, nentries);
0077     ent = tsb + (hash * sizeof(struct tsb));
0078     tag = (v >> 22UL);
0079 
0080     tsb_flush(ent, tag);
0081 }
0082 
0083 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
0084                 unsigned long tsb, unsigned long nentries)
0085 {
0086     unsigned long i;
0087 
0088     for (i = 0; i < tb->tlb_nr; i++)
0089         __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
0090 }
0091 
0092 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0093 static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
0094                        unsigned long hash_shift,
0095                        unsigned long nentries,
0096                        unsigned int hugepage_shift)
0097 {
0098     unsigned int hpage_entries;
0099     unsigned int i;
0100 
0101     hpage_entries = 1 << (hugepage_shift - hash_shift);
0102     for (i = 0; i < hpage_entries; i++)
0103         __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
0104                       nentries);
0105 }
0106 
0107 static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
0108                  unsigned long tsb, unsigned long nentries,
0109                  unsigned int hugepage_shift)
0110 {
0111     unsigned long i;
0112 
0113     for (i = 0; i < tb->tlb_nr; i++)
0114         __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
0115                        nentries, hugepage_shift);
0116 }
0117 #endif
0118 
0119 void flush_tsb_user(struct tlb_batch *tb)
0120 {
0121     struct mm_struct *mm = tb->mm;
0122     unsigned long nentries, base, flags;
0123 
0124     spin_lock_irqsave(&mm->context.lock, flags);
0125 
0126     if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
0127         base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
0128         nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
0129         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
0130             base = __pa(base);
0131         if (tb->hugepage_shift == PAGE_SHIFT)
0132             __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
0133 #if defined(CONFIG_HUGETLB_PAGE)
0134         else
0135             __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
0136                          tb->hugepage_shift);
0137 #endif
0138     }
0139 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0140     else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
0141         base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
0142         nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
0143         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
0144             base = __pa(base);
0145         __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
0146                      tb->hugepage_shift);
0147     }
0148 #endif
0149     spin_unlock_irqrestore(&mm->context.lock, flags);
0150 }
0151 
0152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
0153              unsigned int hugepage_shift)
0154 {
0155     unsigned long nentries, base, flags;
0156 
0157     spin_lock_irqsave(&mm->context.lock, flags);
0158 
0159     if (hugepage_shift < REAL_HPAGE_SHIFT) {
0160         base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
0161         nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
0162         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
0163             base = __pa(base);
0164         if (hugepage_shift == PAGE_SHIFT)
0165             __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
0166                           nentries);
0167 #if defined(CONFIG_HUGETLB_PAGE)
0168         else
0169             __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
0170                            nentries, hugepage_shift);
0171 #endif
0172     }
0173 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0174     else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
0175         base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
0176         nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
0177         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
0178             base = __pa(base);
0179         __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
0180                        nentries, hugepage_shift);
0181     }
0182 #endif
0183     spin_unlock_irqrestore(&mm->context.lock, flags);
0184 }
0185 
0186 #define HV_PGSZ_IDX_BASE    HV_PGSZ_IDX_8K
0187 #define HV_PGSZ_MASK_BASE   HV_PGSZ_MASK_8K
0188 
0189 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0190 #define HV_PGSZ_IDX_HUGE    HV_PGSZ_IDX_4MB
0191 #define HV_PGSZ_MASK_HUGE   HV_PGSZ_MASK_4MB
0192 #endif
0193 
0194 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
0195 {
0196     unsigned long tsb_reg, base, tsb_paddr;
0197     unsigned long page_sz, tte;
0198 
0199     mm->context.tsb_block[tsb_idx].tsb_nentries =
0200         tsb_bytes / sizeof(struct tsb);
0201 
0202     switch (tsb_idx) {
0203     case MM_TSB_BASE:
0204         base = TSBMAP_8K_BASE;
0205         break;
0206 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0207     case MM_TSB_HUGE:
0208         base = TSBMAP_4M_BASE;
0209         break;
0210 #endif
0211     default:
0212         BUG();
0213     }
0214 
0215     tte = pgprot_val(PAGE_KERNEL_LOCKED);
0216     tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
0217     BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
0218 
0219     /* Use the smallest page size that can map the whole TSB
0220      * in one TLB entry.
0221      */
0222     switch (tsb_bytes) {
0223     case 8192 << 0:
0224         tsb_reg = 0x0UL;
0225 #ifdef DCACHE_ALIASING_POSSIBLE
0226         base += (tsb_paddr & 8192);
0227 #endif
0228         page_sz = 8192;
0229         break;
0230 
0231     case 8192 << 1:
0232         tsb_reg = 0x1UL;
0233         page_sz = 64 * 1024;
0234         break;
0235 
0236     case 8192 << 2:
0237         tsb_reg = 0x2UL;
0238         page_sz = 64 * 1024;
0239         break;
0240 
0241     case 8192 << 3:
0242         tsb_reg = 0x3UL;
0243         page_sz = 64 * 1024;
0244         break;
0245 
0246     case 8192 << 4:
0247         tsb_reg = 0x4UL;
0248         page_sz = 512 * 1024;
0249         break;
0250 
0251     case 8192 << 5:
0252         tsb_reg = 0x5UL;
0253         page_sz = 512 * 1024;
0254         break;
0255 
0256     case 8192 << 6:
0257         tsb_reg = 0x6UL;
0258         page_sz = 512 * 1024;
0259         break;
0260 
0261     case 8192 << 7:
0262         tsb_reg = 0x7UL;
0263         page_sz = 4 * 1024 * 1024;
0264         break;
0265 
0266     default:
0267         printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
0268                current->comm, current->pid, tsb_bytes);
0269         BUG();
0270     }
0271     tte |= pte_sz_bits(page_sz);
0272 
0273     if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
0274         /* Physical mapping, no locked TLB entry for TSB.  */
0275         tsb_reg |= tsb_paddr;
0276 
0277         mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
0278         mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
0279         mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
0280     } else {
0281         tsb_reg |= base;
0282         tsb_reg |= (tsb_paddr & (page_sz - 1UL));
0283         tte |= (tsb_paddr & ~(page_sz - 1UL));
0284 
0285         mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
0286         mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
0287         mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
0288     }
0289 
0290     /* Setup the Hypervisor TSB descriptor.  */
0291     if (tlb_type == hypervisor) {
0292         struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
0293 
0294         switch (tsb_idx) {
0295         case MM_TSB_BASE:
0296             hp->pgsz_idx = HV_PGSZ_IDX_BASE;
0297             break;
0298 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0299         case MM_TSB_HUGE:
0300             hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
0301             break;
0302 #endif
0303         default:
0304             BUG();
0305         }
0306         hp->assoc = 1;
0307         hp->num_ttes = tsb_bytes / 16;
0308         hp->ctx_idx = 0;
0309         switch (tsb_idx) {
0310         case MM_TSB_BASE:
0311             hp->pgsz_mask = HV_PGSZ_MASK_BASE;
0312             break;
0313 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0314         case MM_TSB_HUGE:
0315             hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
0316             break;
0317 #endif
0318         default:
0319             BUG();
0320         }
0321         hp->tsb_base = tsb_paddr;
0322         hp->resv = 0;
0323     }
0324 }
0325 
0326 struct kmem_cache *pgtable_cache __read_mostly;
0327 
0328 static struct kmem_cache *tsb_caches[8] __read_mostly;
0329 
0330 static const char *tsb_cache_names[8] = {
0331     "tsb_8KB",
0332     "tsb_16KB",
0333     "tsb_32KB",
0334     "tsb_64KB",
0335     "tsb_128KB",
0336     "tsb_256KB",
0337     "tsb_512KB",
0338     "tsb_1MB",
0339 };
0340 
0341 void __init pgtable_cache_init(void)
0342 {
0343     unsigned long i;
0344 
0345     pgtable_cache = kmem_cache_create("pgtable_cache",
0346                       PAGE_SIZE, PAGE_SIZE,
0347                       0,
0348                       _clear_page);
0349     if (!pgtable_cache) {
0350         prom_printf("pgtable_cache_init(): Could not create!\n");
0351         prom_halt();
0352     }
0353 
0354     for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
0355         unsigned long size = 8192 << i;
0356         const char *name = tsb_cache_names[i];
0357 
0358         tsb_caches[i] = kmem_cache_create(name,
0359                           size, size,
0360                           0, NULL);
0361         if (!tsb_caches[i]) {
0362             prom_printf("Could not create %s cache\n", name);
0363             prom_halt();
0364         }
0365     }
0366 }
0367 
0368 int sysctl_tsb_ratio = -2;
0369 
0370 static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
0371 {
0372     unsigned long num_ents = (new_size / sizeof(struct tsb));
0373 
0374     if (sysctl_tsb_ratio < 0)
0375         return num_ents - (num_ents >> -sysctl_tsb_ratio);
0376     else
0377         return num_ents + (num_ents >> sysctl_tsb_ratio);
0378 }
0379 
0380 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
0381  * do_sparc64_fault() invokes this routine to try and grow it.
0382  *
0383  * When we reach the maximum TSB size supported, we stick ~0UL into
0384  * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
0385  * will not trigger any longer.
0386  *
0387  * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
0388  * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
0389  * must be 512K aligned.  It also must be physically contiguous, so we
0390  * cannot use vmalloc().
0391  *
0392  * The idea here is to grow the TSB when the RSS of the process approaches
0393  * the number of entries that the current TSB can hold at once.  Currently,
0394  * we trigger when the RSS hits 3/4 of the TSB capacity.
0395  */
0396 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
0397 {
0398     unsigned long max_tsb_size = 1 * 1024 * 1024;
0399     unsigned long new_size, old_size, flags;
0400     struct tsb *old_tsb, *new_tsb;
0401     unsigned long new_cache_index, old_cache_index;
0402     unsigned long new_rss_limit;
0403     gfp_t gfp_flags;
0404 
0405     if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
0406         max_tsb_size = (PAGE_SIZE << MAX_ORDER);
0407 
0408     new_cache_index = 0;
0409     for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
0410         new_rss_limit = tsb_size_to_rss_limit(new_size);
0411         if (new_rss_limit > rss)
0412             break;
0413         new_cache_index++;
0414     }
0415 
0416     if (new_size == max_tsb_size)
0417         new_rss_limit = ~0UL;
0418 
0419 retry_tsb_alloc:
0420     gfp_flags = GFP_KERNEL;
0421     if (new_size > (PAGE_SIZE * 2))
0422         gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
0423 
0424     new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
0425                     gfp_flags, numa_node_id());
0426     if (unlikely(!new_tsb)) {
0427         /* Not being able to fork due to a high-order TSB
0428          * allocation failure is very bad behavior.  Just back
0429          * down to a 0-order allocation and force no TSB
0430          * growing for this address space.
0431          */
0432         if (mm->context.tsb_block[tsb_index].tsb == NULL &&
0433             new_cache_index > 0) {
0434             new_cache_index = 0;
0435             new_size = 8192;
0436             new_rss_limit = ~0UL;
0437             goto retry_tsb_alloc;
0438         }
0439 
0440         /* If we failed on a TSB grow, we are under serious
0441          * memory pressure so don't try to grow any more.
0442          */
0443         if (mm->context.tsb_block[tsb_index].tsb != NULL)
0444             mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
0445         return;
0446     }
0447 
0448     /* Mark all tags as invalid.  */
0449     tsb_init(new_tsb, new_size);
0450 
0451     /* Ok, we are about to commit the changes.  If we are
0452      * growing an existing TSB the locking is very tricky,
0453      * so WATCH OUT!
0454      *
0455      * We have to hold mm->context.lock while committing to the
0456      * new TSB, this synchronizes us with processors in
0457      * flush_tsb_user() and switch_mm() for this address space.
0458      *
0459      * But even with that lock held, processors run asynchronously
0460      * accessing the old TSB via TLB miss handling.  This is OK
0461      * because those actions are just propagating state from the
0462      * Linux page tables into the TSB, page table mappings are not
0463      * being changed.  If a real fault occurs, the processor will
0464      * synchronize with us when it hits flush_tsb_user(), this is
0465      * also true for the case where vmscan is modifying the page
0466      * tables.  The only thing we need to be careful with is to
0467      * skip any locked TSB entries during copy_tsb().
0468      *
0469      * When we finish committing to the new TSB, we have to drop
0470      * the lock and ask all other cpus running this address space
0471      * to run tsb_context_switch() to see the new TSB table.
0472      */
0473     spin_lock_irqsave(&mm->context.lock, flags);
0474 
0475     old_tsb = mm->context.tsb_block[tsb_index].tsb;
0476     old_cache_index =
0477         (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
0478     old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
0479             sizeof(struct tsb));
0480 
0481 
0482     /* Handle multiple threads trying to grow the TSB at the same time.
0483      * One will get in here first, and bump the size and the RSS limit.
0484      * The others will get in here next and hit this check.
0485      */
0486     if (unlikely(old_tsb &&
0487              (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
0488         spin_unlock_irqrestore(&mm->context.lock, flags);
0489 
0490         kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
0491         return;
0492     }
0493 
0494     mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
0495 
0496     if (old_tsb) {
0497         extern void copy_tsb(unsigned long old_tsb_base,
0498                      unsigned long old_tsb_size,
0499                      unsigned long new_tsb_base,
0500                      unsigned long new_tsb_size,
0501                      unsigned long page_size_shift);
0502         unsigned long old_tsb_base = (unsigned long) old_tsb;
0503         unsigned long new_tsb_base = (unsigned long) new_tsb;
0504 
0505         if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
0506             old_tsb_base = __pa(old_tsb_base);
0507             new_tsb_base = __pa(new_tsb_base);
0508         }
0509         copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
0510             tsb_index == MM_TSB_BASE ?
0511             PAGE_SHIFT : REAL_HPAGE_SHIFT);
0512     }
0513 
0514     mm->context.tsb_block[tsb_index].tsb = new_tsb;
0515     setup_tsb_params(mm, tsb_index, new_size);
0516 
0517     spin_unlock_irqrestore(&mm->context.lock, flags);
0518 
0519     /* If old_tsb is NULL, we're being invoked for the first time
0520      * from init_new_context().
0521      */
0522     if (old_tsb) {
0523         /* Reload it on the local cpu.  */
0524         tsb_context_switch(mm);
0525 
0526         /* Now force other processors to do the same.  */
0527         preempt_disable();
0528         smp_tsb_sync(mm);
0529         preempt_enable();
0530 
0531         /* Now it is safe to free the old tsb.  */
0532         kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
0533     }
0534 }
0535 
0536 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0537 {
0538     unsigned long mm_rss = get_mm_rss(mm);
0539 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0540     unsigned long saved_hugetlb_pte_count;
0541     unsigned long saved_thp_pte_count;
0542 #endif
0543     unsigned int i;
0544 
0545     spin_lock_init(&mm->context.lock);
0546 
0547     mm->context.sparc64_ctx_val = 0UL;
0548 
0549     mm->context.tag_store = NULL;
0550     spin_lock_init(&mm->context.tag_lock);
0551 
0552 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0553     /* We reset them to zero because the fork() page copying
0554      * will re-increment the counters as the parent PTEs are
0555      * copied into the child address space.
0556      */
0557     saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
0558     saved_thp_pte_count = mm->context.thp_pte_count;
0559     mm->context.hugetlb_pte_count = 0;
0560     mm->context.thp_pte_count = 0;
0561 
0562     mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
0563 #endif
0564 
0565     /* copy_mm() copies over the parent's mm_struct before calling
0566      * us, so we need to zero out the TSB pointer or else tsb_grow()
0567      * will be confused and think there is an older TSB to free up.
0568      */
0569     for (i = 0; i < MM_NUM_TSBS; i++)
0570         mm->context.tsb_block[i].tsb = NULL;
0571 
0572     /* If this is fork, inherit the parent's TSB size.  We would
0573      * grow it to that size on the first page fault anyways.
0574      */
0575     tsb_grow(mm, MM_TSB_BASE, mm_rss);
0576 
0577 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0578     if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
0579         tsb_grow(mm, MM_TSB_HUGE,
0580              (saved_hugetlb_pte_count + saved_thp_pte_count) *
0581              REAL_HPAGE_PER_HPAGE);
0582 #endif
0583 
0584     if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
0585         return -ENOMEM;
0586 
0587     return 0;
0588 }
0589 
0590 static void tsb_destroy_one(struct tsb_config *tp)
0591 {
0592     unsigned long cache_index;
0593 
0594     if (!tp->tsb)
0595         return;
0596     cache_index = tp->tsb_reg_val & 0x7UL;
0597     kmem_cache_free(tsb_caches[cache_index], tp->tsb);
0598     tp->tsb = NULL;
0599     tp->tsb_reg_val = 0UL;
0600 }
0601 
0602 void destroy_context(struct mm_struct *mm)
0603 {
0604     unsigned long flags, i;
0605 
0606     for (i = 0; i < MM_NUM_TSBS; i++)
0607         tsb_destroy_one(&mm->context.tsb_block[i]);
0608 
0609     spin_lock_irqsave(&ctx_alloc_lock, flags);
0610 
0611     if (CTX_VALID(mm->context)) {
0612         unsigned long nr = CTX_NRBITS(mm->context);
0613         mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
0614     }
0615 
0616     spin_unlock_irqrestore(&ctx_alloc_lock, flags);
0617 
0618     /* If ADI tag storage was allocated for this task, free it */
0619     if (mm->context.tag_store) {
0620         tag_storage_desc_t *tag_desc;
0621         unsigned long max_desc;
0622         unsigned char *tags;
0623 
0624         tag_desc = mm->context.tag_store;
0625         max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
0626         for (i = 0; i < max_desc; i++) {
0627             tags = tag_desc->tags;
0628             tag_desc->tags = NULL;
0629             kfree(tags);
0630             tag_desc++;
0631         }
0632         kfree(mm->context.tag_store);
0633         mm->context.tag_store = NULL;
0634     }
0635 }