0001
0002 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
0003 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
0004
0005
0006
0007
0008
0009
0010
0011 #include <asm/page.h>
0012 #include <asm/bug.h>
0013 #include <asm/asm-const.h>
0014
0015
0016
0017
0018
0019
0020 #include <asm/book3s/64/pgtable.h>
0021 #include <asm/book3s/64/slice.h>
0022 #include <asm/task_size_64.h>
0023 #include <asm/cpu_has_feature.h>
0024
0025
0026
0027
0028
0029 #define SLB_NUM_BOLTED 2
0030 #define SLB_CACHE_ENTRIES 8
0031 #define SLB_MIN_SIZE 32
0032
0033
0034 #define SLB_ESID_V ASM_CONST(0x0000000008000000)
0035
0036
0037 #define SLB_VSID_SHIFT 12
0038 #define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
0039 #define SLB_VSID_SHIFT_1T 24
0040 #define SLB_VSID_SSIZE_SHIFT 62
0041 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
0042 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
0043 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
0044 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
0045 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
0046 #define SLB_VSID_N ASM_CONST(0x0000000000000200)
0047 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
0048 #define SLB_VSID_C ASM_CONST(0x0000000000000080)
0049 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
0050 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
0051 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
0052 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
0053 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
0054 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
0055
0056 #define SLB_VSID_KERNEL (SLB_VSID_KP)
0057 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
0058
0059 #define SLBIE_C (0x08000000)
0060 #define SLBIE_SSIZE_SHIFT 25
0061
0062
0063
0064
0065
0066 #define HPTES_PER_GROUP 8
0067
0068 #define HPTE_V_SSIZE_SHIFT 62
0069 #define HPTE_V_AVPN_SHIFT 7
0070 #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
0071 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
0072 #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
0073 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
0074 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
0075 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
0076 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
0077 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
0078 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
0079 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
0080
0081
0082
0083
0084 #define HPTE_R_3_0_SSIZE_SHIFT 58
0085 #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
0086 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
0087 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
0088 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
0089 #define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
0090 #define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
0091 #define HPTE_R_RPN_SHIFT 12
0092 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
0093 #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
0094 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
0095 #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
0096 #define HPTE_R_N ASM_CONST(0x0000000000000004)
0097 #define HPTE_R_G ASM_CONST(0x0000000000000008)
0098 #define HPTE_R_M ASM_CONST(0x0000000000000010)
0099 #define HPTE_R_I ASM_CONST(0x0000000000000020)
0100 #define HPTE_R_W ASM_CONST(0x0000000000000040)
0101 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
0102 #define HPTE_R_C ASM_CONST(0x0000000000000080)
0103 #define HPTE_R_R ASM_CONST(0x0000000000000100)
0104 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
0105 #define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
0106 #define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
0107 #define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
0108 #define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
0109
0110 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
0111 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
0112
0113
0114 #define PP_RWXX 0
0115 #define PP_RWRX 1
0116 #define PP_RWRW 2
0117 #define PP_RXRX 3
0118 #define PP_RXXX (HPTE_R_PP0 | 2)
0119
0120
0121 #define TLBIEL_INVAL_SEL_MASK 0xc00
0122 #define TLBIEL_INVAL_PAGE 0x000
0123 #define TLBIEL_INVAL_SET_LPID 0x800
0124 #define TLBIEL_INVAL_SET 0xc00
0125 #define TLBIEL_INVAL_SET_MASK 0xfff000
0126 #define TLBIEL_INVAL_SET_SHIFT 12
0127
0128 #define POWER7_TLB_SETS 128
0129 #define POWER8_TLB_SETS 512
0130 #define POWER9_TLB_SETS_HASH 256
0131 #define POWER9_TLB_SETS_RADIX 128
0132
0133 #ifndef __ASSEMBLY__
0134
0135 struct mmu_hash_ops {
0136 void (*hpte_invalidate)(unsigned long slot,
0137 unsigned long vpn,
0138 int bpsize, int apsize,
0139 int ssize, int local);
0140 long (*hpte_updatepp)(unsigned long slot,
0141 unsigned long newpp,
0142 unsigned long vpn,
0143 int bpsize, int apsize,
0144 int ssize, unsigned long flags);
0145 void (*hpte_updateboltedpp)(unsigned long newpp,
0146 unsigned long ea,
0147 int psize, int ssize);
0148 long (*hpte_insert)(unsigned long hpte_group,
0149 unsigned long vpn,
0150 unsigned long prpn,
0151 unsigned long rflags,
0152 unsigned long vflags,
0153 int psize, int apsize,
0154 int ssize);
0155 long (*hpte_remove)(unsigned long hpte_group);
0156 int (*hpte_removebolted)(unsigned long ea,
0157 int psize, int ssize);
0158 void (*flush_hash_range)(unsigned long number, int local);
0159 void (*hugepage_invalidate)(unsigned long vsid,
0160 unsigned long addr,
0161 unsigned char *hpte_slot_array,
0162 int psize, int ssize, int local);
0163 int (*resize_hpt)(unsigned long shift);
0164
0165
0166
0167
0168
0169
0170
0171 void (*hpte_clear_all)(void);
0172 };
0173 extern struct mmu_hash_ops mmu_hash_ops;
0174
0175 struct hash_pte {
0176 __be64 v;
0177 __be64 r;
0178 };
0179
0180 extern struct hash_pte *htab_address;
0181 extern unsigned long htab_size_bytes;
0182 extern unsigned long htab_hash_mask;
0183
0184
0185 static inline int shift_to_mmu_psize(unsigned int shift)
0186 {
0187 int psize;
0188
0189 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
0190 if (mmu_psize_defs[psize].shift == shift)
0191 return psize;
0192 return -1;
0193 }
0194
0195 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
0196 {
0197 if (mmu_psize_defs[mmu_psize].shift)
0198 return mmu_psize_defs[mmu_psize].shift;
0199 BUG();
0200 }
0201
0202 static inline unsigned int ap_to_shift(unsigned long ap)
0203 {
0204 int psize;
0205
0206 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
0207 if (mmu_psize_defs[psize].ap == ap)
0208 return mmu_psize_defs[psize].shift;
0209 }
0210
0211 return -1;
0212 }
0213
0214 static inline unsigned long get_sllp_encoding(int psize)
0215 {
0216 unsigned long sllp;
0217
0218 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
0219 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
0220 return sllp;
0221 }
0222
0223 #endif
0224
0225
0226
0227
0228
0229
0230
0231 #define MMU_SEGSIZE_256M 0
0232 #define MMU_SEGSIZE_1T 1
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 #define VPN_SHIFT 12
0243
0244
0245
0246
0247 #define LP_SHIFT 12
0248 #define LP_BITS 8
0249 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
0250
0251 #ifndef __ASSEMBLY__
0252
0253 static inline int slb_vsid_shift(int ssize)
0254 {
0255 if (ssize == MMU_SEGSIZE_256M)
0256 return SLB_VSID_SHIFT;
0257 return SLB_VSID_SHIFT_1T;
0258 }
0259
0260 static inline int segment_shift(int ssize)
0261 {
0262 if (ssize == MMU_SEGSIZE_256M)
0263 return SID_SHIFT;
0264 return SID_SHIFT_1T;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274 extern u8 hpte_page_sizes[1 << LP_BITS];
0275
0276 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
0277 bool is_base_size)
0278 {
0279 unsigned int i, lp;
0280
0281 if (!(h & HPTE_V_LARGE))
0282 return 1ul << 12;
0283
0284
0285 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
0286 i = hpte_page_sizes[lp];
0287 if (!i)
0288 return 0;
0289 if (!is_base_size)
0290 i >>= 4;
0291 return 1ul << mmu_psize_defs[i & 0xf].shift;
0292 }
0293
0294 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
0295 {
0296 return __hpte_page_size(h, l, 0);
0297 }
0298
0299 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
0300 {
0301 return __hpte_page_size(h, l, 1);
0302 }
0303
0304
0305
0306
0307 extern int mmu_kernel_ssize;
0308 extern int mmu_highuser_ssize;
0309 extern u16 mmu_slb_size;
0310 extern unsigned long tce_alloc_start, tce_alloc_end;
0311
0312
0313
0314
0315
0316
0317
0318 extern int mmu_ci_restrictions;
0319
0320
0321
0322
0323
0324
0325 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
0326 int ssize)
0327 {
0328 unsigned long v;
0329
0330
0331
0332
0333
0334
0335
0336
0337 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
0338 v <<= HPTE_V_AVPN_SHIFT;
0339 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
0340 return v;
0341 }
0342
0343
0344
0345
0346
0347
0348 static inline unsigned long hpte_old_to_new_v(unsigned long v)
0349 {
0350
0351 return v & HPTE_V_COMMON_BITS;
0352 }
0353
0354 static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
0355 {
0356
0357 return (r & ~HPTE_R_3_0_SSIZE_MASK) |
0358 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
0359 }
0360
0361 static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
0362 {
0363
0364 return (v & HPTE_V_COMMON_BITS) |
0365 ((r & HPTE_R_3_0_SSIZE_MASK) <<
0366 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
0367 }
0368
0369 static inline unsigned long hpte_new_to_old_r(unsigned long r)
0370 {
0371
0372 return r & ~HPTE_R_3_0_SSIZE_MASK;
0373 }
0374
0375 static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
0376 {
0377 unsigned long hpte_v;
0378
0379 hpte_v = be64_to_cpu(hptep->v);
0380 if (cpu_has_feature(CPU_FTR_ARCH_300))
0381 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
0382 return hpte_v;
0383 }
0384
0385
0386
0387
0388
0389 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
0390 int actual_psize, int ssize)
0391 {
0392 unsigned long v;
0393 v = hpte_encode_avpn(vpn, base_psize, ssize);
0394 if (actual_psize != MMU_PAGE_4K)
0395 v |= HPTE_V_LARGE;
0396 return v;
0397 }
0398
0399
0400
0401
0402
0403
0404 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
0405 int actual_psize)
0406 {
0407
0408 if (actual_psize == MMU_PAGE_4K)
0409 return pa & HPTE_R_RPN;
0410 else {
0411 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
0412 unsigned int shift = mmu_psize_defs[actual_psize].shift;
0413 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
0414 }
0415 }
0416
0417
0418
0419
0420 static inline unsigned long hpt_vpn(unsigned long ea,
0421 unsigned long vsid, int ssize)
0422 {
0423 unsigned long mask;
0424 int s_shift = segment_shift(ssize);
0425
0426 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
0427 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
0428 }
0429
0430
0431
0432
0433 static inline unsigned long hpt_hash(unsigned long vpn,
0434 unsigned int shift, int ssize)
0435 {
0436 unsigned long mask;
0437 unsigned long hash, vsid;
0438
0439
0440 if (ssize == MMU_SEGSIZE_256M) {
0441 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
0442 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
0443 ((vpn & mask) >> (shift - VPN_SHIFT));
0444 } else {
0445 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
0446 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
0447 hash = vsid ^ (vsid << 25) ^
0448 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
0449 }
0450 return hash & 0x7fffffffffUL;
0451 }
0452
0453 #define HPTE_LOCAL_UPDATE 0x1
0454 #define HPTE_NOHPTE_UPDATE 0x2
0455 #define HPTE_USE_KERNEL_KEY 0x4
0456
0457 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
0458 unsigned long rlags, unsigned long vflags, int psize, int ssize);
0459 extern int __hash_page_4K(unsigned long ea, unsigned long access,
0460 unsigned long vsid, pte_t *ptep, unsigned long trap,
0461 unsigned long flags, int ssize, int subpage_prot);
0462 extern int __hash_page_64K(unsigned long ea, unsigned long access,
0463 unsigned long vsid, pte_t *ptep, unsigned long trap,
0464 unsigned long flags, int ssize);
0465 struct mm_struct;
0466 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
0467 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
0468 unsigned long access, unsigned long trap,
0469 unsigned long flags);
0470 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
0471 unsigned long dsisr);
0472 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
0473 int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
0474 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
0475 pte_t *ptep, unsigned long trap, unsigned long flags,
0476 int ssize, unsigned int shift, unsigned int mmu_psize);
0477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0478 extern int __hash_page_thp(unsigned long ea, unsigned long access,
0479 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
0480 unsigned long flags, int ssize, unsigned int psize);
0481 #else
0482 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
0483 unsigned long vsid, pmd_t *pmdp,
0484 unsigned long trap, unsigned long flags,
0485 int ssize, unsigned int psize)
0486 {
0487 BUG();
0488 return -1;
0489 }
0490 #endif
0491 extern void hash_failure_debug(unsigned long ea, unsigned long access,
0492 unsigned long vsid, unsigned long trap,
0493 int ssize, int psize, int lpsize,
0494 unsigned long pte);
0495 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
0496 unsigned long pstart, unsigned long prot,
0497 int psize, int ssize);
0498 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
0499 int psize, int ssize);
0500 extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
0501 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
0502
0503 extern void hash__setup_new_exec(void);
0504
0505 #ifdef CONFIG_PPC_PSERIES
0506 void hpte_init_pseries(void);
0507 #else
0508 static inline void hpte_init_pseries(void) { }
0509 #endif
0510
0511 extern void hpte_init_native(void);
0512
0513 struct slb_entry {
0514 u64 esid;
0515 u64 vsid;
0516 };
0517
0518 extern void slb_initialize(void);
0519 void slb_flush_and_restore_bolted(void);
0520 void slb_flush_all_realmode(void);
0521 void __slb_restore_bolted_realmode(void);
0522 void slb_restore_bolted_realmode(void);
0523 void slb_save_contents(struct slb_entry *slb_ptr);
0524 void slb_dump_contents(struct slb_entry *slb_ptr);
0525
0526 extern void slb_vmalloc_update(void);
0527 void preload_new_slb_context(unsigned long start, unsigned long sp);
0528
0529 #ifdef CONFIG_PPC_64S_HASH_MMU
0530 void slb_set_size(u16 size);
0531 #else
0532 static inline void slb_set_size(u16 size) { }
0533 #endif
0534
0535 #endif
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578 #define VA_BITS 68
0579 #define CONTEXT_BITS 19
0580 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
0581 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
0582
0583 #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
0584 #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
0585
0586
0587
0588
0589
0590
0591
0592 #if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
0593 #define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
0594 #else
0595 #define MAX_KERNEL_CTX_CNT 1
0596 #endif
0597
0598 #define MAX_VMALLOC_CTX_CNT 1
0599 #define MAX_IO_CTX_CNT 1
0600 #define MAX_VMEMMAP_CTX_CNT 1
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
0615
0616
0617 #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
0618 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
0619
0620
0621
0622
0623 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 #define VSID_MULTIPLIER_256M ASM_CONST(12538073)
0651 #define VSID_BITS_256M (VA_BITS - SID_SHIFT)
0652 #define VSID_BITS_65_256M (65 - SID_SHIFT)
0653
0654
0655
0656 #define VSID_MULINV_256M ASM_CONST(665548017062)
0657
0658 #define VSID_MULTIPLIER_1T ASM_CONST(12538073)
0659 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
0660 #define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
0661 #define VSID_MULINV_1T ASM_CONST(209034062)
0662
0663
0664 #define VRMA_VSID 0x1ffffffUL
0665 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
0666
0667
0668 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
0669 #define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
0670 #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
0671 #ifndef __ASSEMBLY__
0672
0673 #ifdef CONFIG_PPC_SUBPAGE_PROT
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 struct subpage_prot_table {
0686 unsigned long maxaddr;
0687 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
0688 unsigned int *low_prot[4];
0689 };
0690
0691 #define SBP_L1_BITS (PAGE_SHIFT - 2)
0692 #define SBP_L2_BITS (PAGE_SHIFT - 3)
0693 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
0694 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
0695 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
0696 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
0697
0698 extern void subpage_prot_free(struct mm_struct *mm);
0699 #else
0700 static inline void subpage_prot_free(struct mm_struct *mm) {}
0701 #endif
0702
0703
0704
0705
0706
0707
0708 struct slice_mask {
0709 u64 low_slices;
0710 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
0711 };
0712
0713 struct hash_mm_context {
0714 u16 user_psize;
0715
0716
0717 unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
0718 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
0719 unsigned long slb_addr_limit;
0720 #ifdef CONFIG_PPC_64K_PAGES
0721 struct slice_mask mask_64k;
0722 #endif
0723 struct slice_mask mask_4k;
0724 #ifdef CONFIG_HUGETLB_PAGE
0725 struct slice_mask mask_16m;
0726 struct slice_mask mask_16g;
0727 #endif
0728
0729 #ifdef CONFIG_PPC_SUBPAGE_PROT
0730 struct subpage_prot_table *spt;
0731 #endif
0732 };
0733
0734 #if 0
0735
0736
0737
0738
0739
0740
0741 #define vsid_scramble(protovsid, size) \
0742 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
0743
0744
0745 #define vsid_scramble(protovsid, size) \
0746 ({ \
0747 unsigned long x; \
0748 x = (protovsid) * VSID_MULTIPLIER_##size; \
0749 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
0750 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
0751 })
0752
0753 #else
0754 static inline unsigned long vsid_scramble(unsigned long protovsid,
0755 unsigned long vsid_multiplier, int vsid_bits)
0756 {
0757 unsigned long vsid;
0758 unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
0759
0760
0761
0762 vsid = protovsid * vsid_multiplier;
0763 vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
0764 return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
0765 }
0766
0767 #endif
0768
0769
0770 static inline int user_segment_size(unsigned long addr)
0771 {
0772
0773 if (addr >= (1UL << SID_SHIFT_1T))
0774 return mmu_highuser_ssize;
0775 return MMU_SEGSIZE_256M;
0776 }
0777
0778 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
0779 int ssize)
0780 {
0781 unsigned long va_bits = VA_BITS;
0782 unsigned long vsid_bits;
0783 unsigned long protovsid;
0784
0785
0786
0787
0788 if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
0789 return 0;
0790
0791 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
0792 va_bits = 65;
0793
0794 if (ssize == MMU_SEGSIZE_256M) {
0795 vsid_bits = va_bits - SID_SHIFT;
0796 protovsid = (context << ESID_BITS) |
0797 ((ea >> SID_SHIFT) & ESID_BITS_MASK);
0798 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
0799 }
0800
0801 vsid_bits = va_bits - SID_SHIFT_1T;
0802 protovsid = (context << ESID_BITS_1T) |
0803 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
0804 return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
0805 }
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 static inline unsigned long get_kernel_context(unsigned long ea)
0824 {
0825 unsigned long region_id = get_region_id(ea);
0826 unsigned long ctx;
0827
0828
0829
0830
0831 if (region_id == LINEAR_MAP_REGION_ID) {
0832
0833
0834
0835 ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
0836 } else
0837 ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
0838 return ctx;
0839 }
0840
0841
0842
0843
0844 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
0845 {
0846 unsigned long context;
0847
0848 if (!is_kernel_addr(ea))
0849 return 0;
0850
0851 context = get_kernel_context(ea);
0852 return get_vsid(context, ea, ssize);
0853 }
0854
0855 unsigned htab_shift_for_mem_size(unsigned long mem_size);
0856
0857 enum slb_index {
0858 LINEAR_INDEX = 0,
0859 KSTACK_INDEX = 1,
0860 };
0861
0862 #define slb_esid_mask(ssize) \
0863 (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
0864
0865 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
0866 enum slb_index index)
0867 {
0868 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
0869 }
0870
0871 static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
0872 unsigned long flags)
0873 {
0874 return (vsid << slb_vsid_shift(ssize)) | flags |
0875 ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
0876 }
0877
0878 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
0879 unsigned long flags)
0880 {
0881 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
0882 }
0883
0884 #endif
0885 #endif