0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/pgtable.h>
0009 #include <asm/asi.h>
0010 #include <asm/page.h>
0011 #include <asm/spitfire.h>
0012 #include <asm/mmu_context.h>
0013 #include <asm/mmu.h>
0014 #include <asm/pil.h>
0015 #include <asm/head.h>
0016 #include <asm/thread_info.h>
0017 #include <asm/cacheflush.h>
0018 #include <asm/hypervisor.h>
0019 #include <asm/cpudata.h>
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 .text
0032 .align 32
0033 .globl __flush_tlb_mm
0034 __flush_tlb_mm:
0035
0036 ldxa [%o1] ASI_DMMU, %g2
0037 cmp %g2, %o0
0038 bne,pn %icc, __spitfire_flush_tlb_mm_slow
0039 mov 0x50, %g3
0040 stxa %g0, [%g3] ASI_DMMU_DEMAP
0041 stxa %g0, [%g3] ASI_IMMU_DEMAP
0042 sethi %hi(KERNBASE), %g3
0043 flush %g3
0044 retl
0045 nop
0046 nop
0047 nop
0048 nop
0049 nop
0050 nop
0051 nop
0052 nop
0053 nop
0054 nop
0055
0056 .align 32
0057 .globl __flush_tlb_page
0058 __flush_tlb_page:
0059
0060 rdpr %pstate, %g7
0061 andn %g7, PSTATE_IE, %g2
0062 wrpr %g2, %pstate
0063 mov SECONDARY_CONTEXT, %o4
0064 ldxa [%o4] ASI_DMMU, %g2
0065 stxa %o0, [%o4] ASI_DMMU
0066 andcc %o1, 1, %g0
0067 andn %o1, 1, %o3
0068 be,pn %icc, 1f
0069 or %o3, 0x10, %o3
0070 stxa %g0, [%o3] ASI_IMMU_DEMAP
0071 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
0072 membar #Sync
0073 stxa %g2, [%o4] ASI_DMMU
0074 sethi %hi(KERNBASE), %o4
0075 flush %o4
0076 retl
0077 wrpr %g7, 0x0, %pstate
0078 nop
0079 nop
0080 nop
0081 nop
0082
0083 .align 32
0084 .globl __flush_tlb_pending
0085 __flush_tlb_pending:
0086
0087 rdpr %pstate, %g7
0088 sllx %o1, 3, %o1
0089 andn %g7, PSTATE_IE, %g2
0090 wrpr %g2, %pstate
0091 mov SECONDARY_CONTEXT, %o4
0092 ldxa [%o4] ASI_DMMU, %g2
0093 stxa %o0, [%o4] ASI_DMMU
0094 1: sub %o1, (1 << 3), %o1
0095 ldx [%o2 + %o1], %o3
0096 andcc %o3, 1, %g0
0097 andn %o3, 1, %o3
0098 be,pn %icc, 2f
0099 or %o3, 0x10, %o3
0100 stxa %g0, [%o3] ASI_IMMU_DEMAP
0101 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
0102 membar #Sync
0103 brnz,pt %o1, 1b
0104 nop
0105 stxa %g2, [%o4] ASI_DMMU
0106 sethi %hi(KERNBASE), %o4
0107 flush %o4
0108 retl
0109 wrpr %g7, 0x0, %pstate
0110 nop
0111 nop
0112 nop
0113 nop
0114
0115 .align 32
0116 .globl __flush_tlb_kernel_range
0117 __flush_tlb_kernel_range:
0118
0119 cmp %o0, %o1
0120 be,pn %xcc, 2f
0121 sub %o1, %o0, %o3
0122 srlx %o3, 18, %o4
0123 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
0124 sethi %hi(PAGE_SIZE), %o4
0125 sub %o3, %o4, %o3
0126 or %o0, 0x20, %o0 ! Nucleus
0127 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
0128 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
0129 membar #Sync
0130 brnz,pt %o3, 1b
0131 sub %o3, %o4, %o3
0132 2: sethi %hi(KERNBASE), %o3
0133 flush %o3
0134 retl
0135 nop
0136 nop
0137 nop
0138 nop
0139 nop
0140 nop
0141 nop
0142 nop
0143 nop
0144 nop
0145 nop
0146 nop
0147 nop
0148 nop
0149 nop
0150
0151 __spitfire_flush_tlb_kernel_range_slow:
0152 mov 63 * 8, %o4
0153 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
0154 andcc %o3, 0x40, %g0
0155 bne,pn %xcc, 2f
0156 mov TLB_TAG_ACCESS, %o3
0157 stxa %g0, [%o3] ASI_IMMU
0158 stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
0159 membar #Sync
0160 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
0161 andcc %o3, 0x40, %g0
0162 bne,pn %xcc, 2f
0163 mov TLB_TAG_ACCESS, %o3
0164 stxa %g0, [%o3] ASI_DMMU
0165 stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
0166 membar #Sync
0167 2: sub %o4, 8, %o4
0168 brgez,pt %o4, 1b
0169 nop
0170 retl
0171 nop
0172
0173 __spitfire_flush_tlb_mm_slow:
0174 rdpr %pstate, %g1
0175 wrpr %g1, PSTATE_IE, %pstate
0176 stxa %o0, [%o1] ASI_DMMU
0177 stxa %g0, [%g3] ASI_DMMU_DEMAP
0178 stxa %g0, [%g3] ASI_IMMU_DEMAP
0179 flush %g6
0180 stxa %g2, [%o1] ASI_DMMU
0181 sethi %hi(KERNBASE), %o1
0182 flush %o1
0183 retl
0184 wrpr %g1, 0, %pstate
0185
0186
0187
0188
0189 .section .kprobes.text, "ax"
0190 .align 32
0191 .globl __flush_icache_page
0192 __flush_icache_page:
0193 srlx %o0, PAGE_SHIFT, %o0
0194 sethi %hi(PAGE_OFFSET), %g1
0195 sllx %o0, PAGE_SHIFT, %o0
0196 sethi %hi(PAGE_SIZE), %g2
0197 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
0198 add %o0, %g1, %o0
0199 1: subcc %g2, 32, %g2
0200 bne,pt %icc, 1b
0201 flush %o0 + %g2
0202 retl
0203 nop
0204
0205 #ifdef DCACHE_ALIASING_POSSIBLE
0206
0207 #if (PAGE_SHIFT != 13)
0208 #error only page shift of 13 is supported by dcache flush
0209 #endif
0210
0211 #define DTAG_MASK 0x3
0212
0213
0214
0215
0216 .align 64
0217 .globl __flush_dcache_page
0218 __flush_dcache_page:
0219 sethi %hi(PAGE_OFFSET), %g1
0220 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
0221 sub %o0, %g1, %o0 ! physical address
0222 srlx %o0, 11, %o0 ! make D-cache TAG
0223 sethi %hi(1 << 14), %o2 ! D-cache size
0224 sub %o2, (1 << 5), %o2 ! D-cache line size
0225 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
0226 andcc %o3, DTAG_MASK, %g0 ! Valid?
0227 be,pn %xcc, 2f ! Nope, branch
0228 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
0229 cmp %o3, %o0 ! TAG match?
0230 bne,pt %xcc, 2f ! Nope, branch
0231 nop
0232 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
0233 membar #Sync
0234 2: brnz,pt %o2, 1b
0235 sub %o2, (1 << 5), %o2 ! D-cache line size
0236
0237
0238
0239
0240 brnz,pt %o1, __flush_icache_page
0241 sllx %o0, 11, %o0
0242 retl
0243 nop
0244
0245 #endif
0246
0247 .previous
0248
0249
0250 __cheetah_flush_tlb_mm:
0251 rdpr %pstate, %g7
0252 andn %g7, PSTATE_IE, %g2
0253 wrpr %g2, 0x0, %pstate
0254 wrpr %g0, 1, %tl
0255 mov PRIMARY_CONTEXT, %o2
0256 mov 0x40, %g3
0257 ldxa [%o2] ASI_DMMU, %g2
0258 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
0259 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
0260 or %o0, %o1, %o0
0261 stxa %o0, [%o2] ASI_DMMU
0262 stxa %g0, [%g3] ASI_DMMU_DEMAP
0263 stxa %g0, [%g3] ASI_IMMU_DEMAP
0264 stxa %g2, [%o2] ASI_DMMU
0265 sethi %hi(KERNBASE), %o2
0266 flush %o2
0267 wrpr %g0, 0, %tl
0268 retl
0269 wrpr %g7, 0x0, %pstate
0270
0271 __cheetah_flush_tlb_page:
0272
0273 rdpr %pstate, %g7
0274 andn %g7, PSTATE_IE, %g2
0275 wrpr %g2, 0x0, %pstate
0276 wrpr %g0, 1, %tl
0277 mov PRIMARY_CONTEXT, %o4
0278 ldxa [%o4] ASI_DMMU, %g2
0279 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
0280 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
0281 or %o0, %o3, %o0
0282 stxa %o0, [%o4] ASI_DMMU
0283 andcc %o1, 1, %g0
0284 be,pn %icc, 1f
0285 andn %o1, 1, %o3
0286 stxa %g0, [%o3] ASI_IMMU_DEMAP
0287 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
0288 membar #Sync
0289 stxa %g2, [%o4] ASI_DMMU
0290 sethi %hi(KERNBASE), %o4
0291 flush %o4
0292 wrpr %g0, 0, %tl
0293 retl
0294 wrpr %g7, 0x0, %pstate
0295
0296 __cheetah_flush_tlb_pending:
0297
0298 rdpr %pstate, %g7
0299 sllx %o1, 3, %o1
0300 andn %g7, PSTATE_IE, %g2
0301 wrpr %g2, 0x0, %pstate
0302 wrpr %g0, 1, %tl
0303 mov PRIMARY_CONTEXT, %o4
0304 ldxa [%o4] ASI_DMMU, %g2
0305 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
0306 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
0307 or %o0, %o3, %o0
0308 stxa %o0, [%o4] ASI_DMMU
0309 1: sub %o1, (1 << 3), %o1
0310 ldx [%o2 + %o1], %o3
0311 andcc %o3, 1, %g0
0312 be,pn %icc, 2f
0313 andn %o3, 1, %o3
0314 stxa %g0, [%o3] ASI_IMMU_DEMAP
0315 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
0316 membar #Sync
0317 brnz,pt %o1, 1b
0318 nop
0319 stxa %g2, [%o4] ASI_DMMU
0320 sethi %hi(KERNBASE), %o4
0321 flush %o4
0322 wrpr %g0, 0, %tl
0323 retl
0324 wrpr %g7, 0x0, %pstate
0325
0326 __cheetah_flush_tlb_kernel_range:
0327
0328 cmp %o0, %o1
0329 be,pn %xcc, 2f
0330 sub %o1, %o0, %o3
0331 srlx %o3, 18, %o4
0332 brnz,pn %o4, 3f
0333 sethi %hi(PAGE_SIZE), %o4
0334 sub %o3, %o4, %o3
0335 or %o0, 0x20, %o0 ! Nucleus
0336 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
0337 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
0338 membar #Sync
0339 brnz,pt %o3, 1b
0340 sub %o3, %o4, %o3
0341 2: sethi %hi(KERNBASE), %o3
0342 flush %o3
0343 retl
0344 nop
0345 3: mov 0x80, %o4
0346 stxa %g0, [%o4] ASI_DMMU_DEMAP
0347 membar #Sync
0348 stxa %g0, [%o4] ASI_IMMU_DEMAP
0349 membar #Sync
0350 retl
0351 nop
0352 nop
0353 nop
0354 nop
0355 nop
0356 nop
0357 nop
0358 nop
0359
0360 #ifdef DCACHE_ALIASING_POSSIBLE
0361 __cheetah_flush_dcache_page:
0362 sethi %hi(PAGE_OFFSET), %g1
0363 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
0364 sub %o0, %g1, %o0
0365 sethi %hi(PAGE_SIZE), %o4
0366 1: subcc %o4, (1 << 5), %o4
0367 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
0368 membar #Sync
0369 bne,pt %icc, 1b
0370 nop
0371 retl
0372 nop
0373 #endif
0374
0375
0376 __hypervisor_tlb_tl0_error:
0377 save %sp, -192, %sp
0378 mov %i0, %o0
0379 call hypervisor_tlbop_error
0380 mov %i1, %o1
0381 ret
0382 restore
0383
0384 __hypervisor_flush_tlb_mm:
0385 mov %o0, %o2
0386 mov 0, %o0
0387 mov 0, %o1
0388 mov HV_MMU_ALL, %o3
0389 mov HV_FAST_MMU_DEMAP_CTX, %o5
0390 ta HV_FAST_TRAP
0391 brnz,pn %o0, 1f
0392 mov HV_FAST_MMU_DEMAP_CTX, %o1
0393 retl
0394 nop
0395 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
0396 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
0397 nop
0398 nop
0399 nop
0400 nop
0401 nop
0402 nop
0403 nop
0404
0405 __hypervisor_flush_tlb_page:
0406
0407 mov %o0, %g2
0408 mov %o1, %o0
0409 mov %g2, %o1
0410 mov HV_MMU_ALL, %o2
0411 srlx %o0, PAGE_SHIFT, %o0
0412 sllx %o0, PAGE_SHIFT, %o0
0413 ta HV_MMU_UNMAP_ADDR_TRAP
0414 brnz,pn %o0, 1f
0415 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
0416 retl
0417 nop
0418 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
0419 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
0420 nop
0421 nop
0422 nop
0423 nop
0424 nop
0425 nop
0426 nop
0427 nop
0428 nop
0429
0430 __hypervisor_flush_tlb_pending:
0431
0432 sllx %o1, 3, %g1
0433 mov %o2, %g2
0434 mov %o0, %g3
0435 1: sub %g1, (1 << 3), %g1
0436 ldx [%g2 + %g1], %o0
0437 mov %g3, %o1
0438 mov HV_MMU_ALL, %o2
0439 srlx %o0, PAGE_SHIFT, %o0
0440 sllx %o0, PAGE_SHIFT, %o0
0441 ta HV_MMU_UNMAP_ADDR_TRAP
0442 brnz,pn %o0, 1f
0443 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
0444 brnz,pt %g1, 1b
0445 nop
0446 retl
0447 nop
0448 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
0449 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
0450 nop
0451 nop
0452 nop
0453 nop
0454 nop
0455 nop
0456 nop
0457 nop
0458 nop
0459
0460 __hypervisor_flush_tlb_kernel_range:
0461
0462 cmp %o0, %o1
0463 be,pn %xcc, 2f
0464 sub %o1, %o0, %g2
0465 srlx %g2, 18, %g3
0466 brnz,pn %g3, 4f
0467 mov %o0, %g1
0468 sethi %hi(PAGE_SIZE), %g3
0469 sub %g2, %g3, %g2
0470 1: add %g1, %g2, %o0
0471 mov 0, %o1
0472 mov HV_MMU_ALL, %o2
0473 ta HV_MMU_UNMAP_ADDR_TRAP
0474 brnz,pn %o0, 3f
0475 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
0476 brnz,pt %g2, 1b
0477 sub %g2, %g3, %g2
0478 2: retl
0479 nop
0480 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
0481 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
0482 nop
0483 4: mov 0, %o0
0484 mov 0, %o1
0485 mov 0, %o2
0486 mov HV_MMU_ALL, %o3
0487 mov HV_FAST_MMU_DEMAP_CTX, %o5
0488 ta HV_FAST_TRAP
0489 brnz,pn %o0, 3b
0490 mov HV_FAST_MMU_DEMAP_CTX, %o1
0491 retl
0492 nop
0493
0494 #ifdef DCACHE_ALIASING_POSSIBLE
0495
0496
0497
0498
0499 __hypervisor_flush_dcache_page:
0500 retl
0501 nop
0502 #endif
0503
0504 tlb_patch_one:
0505 1: lduw [%o1], %g1
0506 stw %g1, [%o0]
0507 flush %o0
0508 subcc %o2, 1, %o2
0509 add %o1, 4, %o1
0510 bne,pt %icc, 1b
0511 add %o0, 4, %o0
0512 retl
0513 nop
0514
0515 #ifdef CONFIG_SMP
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529 .align 32
0530 .globl xcall_flush_tlb_mm
0531 xcall_flush_tlb_mm:
0532 mov PRIMARY_CONTEXT, %g2
0533 ldxa [%g2] ASI_DMMU, %g3
0534 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
0535 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
0536 or %g5, %g4, %g5
0537 stxa %g5, [%g2] ASI_DMMU
0538 mov 0x40, %g4
0539 stxa %g0, [%g4] ASI_DMMU_DEMAP
0540 stxa %g0, [%g4] ASI_IMMU_DEMAP
0541 stxa %g3, [%g2] ASI_DMMU
0542 retry
0543 nop
0544 nop
0545 nop
0546 nop
0547 nop
0548 nop
0549 nop
0550 nop
0551 nop
0552 nop
0553 nop
0554 nop
0555 nop
0556
0557 .globl xcall_flush_tlb_page
0558 xcall_flush_tlb_page:
0559
0560 mov PRIMARY_CONTEXT, %g4
0561 ldxa [%g4] ASI_DMMU, %g2
0562 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
0563 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
0564 or %g5, %g4, %g5
0565 mov PRIMARY_CONTEXT, %g4
0566 stxa %g5, [%g4] ASI_DMMU
0567 andcc %g1, 0x1, %g0
0568 be,pn %icc, 2f
0569 andn %g1, 0x1, %g5
0570 stxa %g0, [%g5] ASI_IMMU_DEMAP
0571 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
0572 membar #Sync
0573 stxa %g2, [%g4] ASI_DMMU
0574 retry
0575 nop
0576 nop
0577 nop
0578 nop
0579 nop
0580
0581 .globl xcall_flush_tlb_kernel_range
0582 xcall_flush_tlb_kernel_range:
0583 sethi %hi(PAGE_SIZE - 1), %g2
0584 or %g2, %lo(PAGE_SIZE - 1), %g2
0585 andn %g1, %g2, %g1
0586 andn %g7, %g2, %g7
0587 sub %g7, %g1, %g3
0588 srlx %g3, 18, %g2
0589 brnz,pn %g2, 2f
0590 sethi %hi(PAGE_SIZE), %g2
0591 sub %g3, %g2, %g3
0592 or %g1, 0x20, %g1 ! Nucleus
0593 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
0594 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
0595 membar #Sync
0596 brnz,pt %g3, 1b
0597 sub %g3, %g2, %g3
0598 retry
0599 2: mov 63 * 8, %g1
0600 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
0601 andcc %g2, 0x40, %g0
0602 bne,pn %xcc, 2f
0603 mov TLB_TAG_ACCESS, %g2
0604 stxa %g0, [%g2] ASI_IMMU
0605 stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
0606 membar #Sync
0607 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
0608 andcc %g2, 0x40, %g0
0609 bne,pn %xcc, 2f
0610 mov TLB_TAG_ACCESS, %g2
0611 stxa %g0, [%g2] ASI_DMMU
0612 stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
0613 membar #Sync
0614 2: sub %g1, 8, %g1
0615 brgez,pt %g1, 1b
0616 nop
0617 retry
0618 nop
0619 nop
0620 nop
0621 nop
0622 nop
0623 nop
0624 nop
0625 nop
0626 nop
0627
0628
0629
0630
0631 .globl xcall_sync_tick
0632 xcall_sync_tick:
0633
0634 661: rdpr %pstate, %g2
0635 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
0636 .section .sun4v_2insn_patch, "ax"
0637 .word 661b
0638 nop
0639 nop
0640 .previous
0641
0642 rdpr %pil, %g2
0643 wrpr %g0, PIL_NORMAL_MAX, %pil
0644 sethi %hi(109f), %g7
0645 b,pt %xcc, etrap_irq
0646 109: or %g7, %lo(109b), %g7
0647 #ifdef CONFIG_TRACE_IRQFLAGS
0648 call trace_hardirqs_off
0649 nop
0650 #endif
0651 call smp_synchronize_tick_client
0652 nop
0653 b rtrap_xcall
0654 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
0655
0656 .globl xcall_fetch_glob_regs
0657 xcall_fetch_glob_regs:
0658 sethi %hi(global_cpu_snapshot), %g1
0659 or %g1, %lo(global_cpu_snapshot), %g1
0660 __GET_CPUID(%g2)
0661 sllx %g2, 6, %g3
0662 add %g1, %g3, %g1
0663 rdpr %tstate, %g7
0664 stx %g7, [%g1 + GR_SNAP_TSTATE]
0665 rdpr %tpc, %g7
0666 stx %g7, [%g1 + GR_SNAP_TPC]
0667 rdpr %tnpc, %g7
0668 stx %g7, [%g1 + GR_SNAP_TNPC]
0669 stx %o7, [%g1 + GR_SNAP_O7]
0670 stx %i7, [%g1 + GR_SNAP_I7]
0671
0672 rdpr %cwp, %g3
0673 sub %g3, 1, %g7
0674 wrpr %g7, %cwp
0675 mov %i7, %g7
0676 wrpr %g3, %cwp
0677 stx %g7, [%g1 + GR_SNAP_RPC]
0678 sethi %hi(trap_block), %g7
0679 or %g7, %lo(trap_block), %g7
0680 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
0681 add %g7, %g2, %g7
0682 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
0683 stx %g3, [%g1 + GR_SNAP_THREAD]
0684 retry
0685
0686 .globl xcall_fetch_glob_pmu
0687 xcall_fetch_glob_pmu:
0688 sethi %hi(global_cpu_snapshot), %g1
0689 or %g1, %lo(global_cpu_snapshot), %g1
0690 __GET_CPUID(%g2)
0691 sllx %g2, 6, %g3
0692 add %g1, %g3, %g1
0693 rd %pic, %g7
0694 stx %g7, [%g1 + (4 * 8)]
0695 rd %pcr, %g7
0696 stx %g7, [%g1 + (0 * 8)]
0697 retry
0698
0699 .globl xcall_fetch_glob_pmu_n4
0700 xcall_fetch_glob_pmu_n4:
0701 sethi %hi(global_cpu_snapshot), %g1
0702 or %g1, %lo(global_cpu_snapshot), %g1
0703 __GET_CPUID(%g2)
0704 sllx %g2, 6, %g3
0705 add %g1, %g3, %g1
0706
0707 ldxa [%g0] ASI_PIC, %g7
0708 stx %g7, [%g1 + (4 * 8)]
0709 mov 0x08, %g3
0710 ldxa [%g3] ASI_PIC, %g7
0711 stx %g7, [%g1 + (5 * 8)]
0712 mov 0x10, %g3
0713 ldxa [%g3] ASI_PIC, %g7
0714 stx %g7, [%g1 + (6 * 8)]
0715 mov 0x18, %g3
0716 ldxa [%g3] ASI_PIC, %g7
0717 stx %g7, [%g1 + (7 * 8)]
0718
0719 mov %o0, %g2
0720 mov %o1, %g3
0721 mov %o5, %g7
0722
0723 mov HV_FAST_VT_GET_PERFREG, %o5
0724 mov 3, %o0
0725 ta HV_FAST_TRAP
0726 stx %o1, [%g1 + (3 * 8)]
0727 mov HV_FAST_VT_GET_PERFREG, %o5
0728 mov 2, %o0
0729 ta HV_FAST_TRAP
0730 stx %o1, [%g1 + (2 * 8)]
0731 mov HV_FAST_VT_GET_PERFREG, %o5
0732 mov 1, %o0
0733 ta HV_FAST_TRAP
0734 stx %o1, [%g1 + (1 * 8)]
0735 mov HV_FAST_VT_GET_PERFREG, %o5
0736 mov 0, %o0
0737 ta HV_FAST_TRAP
0738 stx %o1, [%g1 + (0 * 8)]
0739
0740 mov %g2, %o0
0741 mov %g3, %o1
0742 mov %g7, %o5
0743
0744 retry
0745
0746 __cheetah_xcall_flush_tlb_kernel_range:
0747 sethi %hi(PAGE_SIZE - 1), %g2
0748 or %g2, %lo(PAGE_SIZE - 1), %g2
0749 andn %g1, %g2, %g1
0750 andn %g7, %g2, %g7
0751 sub %g7, %g1, %g3
0752 srlx %g3, 18, %g2
0753 brnz,pn %g2, 2f
0754 sethi %hi(PAGE_SIZE), %g2
0755 sub %g3, %g2, %g3
0756 or %g1, 0x20, %g1 ! Nucleus
0757 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
0758 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
0759 membar #Sync
0760 brnz,pt %g3, 1b
0761 sub %g3, %g2, %g3
0762 retry
0763 2: mov 0x80, %g2
0764 stxa %g0, [%g2] ASI_DMMU_DEMAP
0765 membar #Sync
0766 stxa %g0, [%g2] ASI_IMMU_DEMAP
0767 membar #Sync
0768 retry
0769 nop
0770 nop
0771 nop
0772 nop
0773 nop
0774 nop
0775 nop
0776 nop
0777 nop
0778 nop
0779 nop
0780 nop
0781 nop
0782 nop
0783 nop
0784 nop
0785 nop
0786 nop
0787 nop
0788 nop
0789 nop
0790 nop
0791
0792 #ifdef DCACHE_ALIASING_POSSIBLE
0793 .align 32
0794 .globl xcall_flush_dcache_page_cheetah
0795 xcall_flush_dcache_page_cheetah:
0796 sethi %hi(PAGE_SIZE), %g3
0797 1: subcc %g3, (1 << 5), %g3
0798 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
0799 membar #Sync
0800 bne,pt %icc, 1b
0801 nop
0802 retry
0803 nop
0804 #endif
0805
0806 .globl xcall_flush_dcache_page_spitfire
0807 xcall_flush_dcache_page_spitfire:
0808
0809
0810 #ifdef DCACHE_ALIASING_POSSIBLE
0811 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
0812 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
0813 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
0814 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
0815 andcc %g2, 0x3, %g0
0816 be,pn %xcc, 2f
0817 andn %g2, 0x3, %g2
0818 cmp %g2, %g1
0819
0820 bne,pt %xcc, 2f
0821 nop
0822 stxa %g0, [%g3] ASI_DCACHE_TAG
0823 membar #Sync
0824 2: cmp %g3, 0
0825 bne,pt %xcc, 1b
0826 sub %g3, (1 << 5), %g3
0827
0828 brz,pn %g5, 2f
0829 #endif
0830 sethi %hi(PAGE_SIZE), %g3
0831
0832 1: flush %g7
0833 subcc %g3, (1 << 5), %g3
0834 bne,pt %icc, 1b
0835 add %g7, (1 << 5), %g7
0836
0837 2: retry
0838 nop
0839 nop
0840
0841
0842
0843
0844 __hypervisor_tlb_xcall_error:
0845 mov %g5, %g4
0846 mov %g6, %g5
0847 ba,pt %xcc, etrap
0848 rd %pc, %g7
0849 mov %l4, %o0
0850 call hypervisor_tlbop_error_xcall
0851 mov %l5, %o1
0852 ba,a,pt %xcc, rtrap
0853
0854 .globl __hypervisor_xcall_flush_tlb_mm
0855 __hypervisor_xcall_flush_tlb_mm:
0856
0857 mov %o0, %g2
0858 mov %o1, %g3
0859 mov %o2, %g4
0860 mov %o3, %g1
0861 mov %o5, %g7
0862 clr %o0
0863 clr %o1
0864 mov %g5, %o2
0865 mov HV_MMU_ALL, %o3
0866 mov HV_FAST_MMU_DEMAP_CTX, %o5
0867 ta HV_FAST_TRAP
0868 mov HV_FAST_MMU_DEMAP_CTX, %g6
0869 brnz,pn %o0, 1f
0870 mov %o0, %g5
0871 mov %g2, %o0
0872 mov %g3, %o1
0873 mov %g4, %o2
0874 mov %g1, %o3
0875 mov %g7, %o5
0876 membar #Sync
0877 retry
0878 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
0879 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
0880 nop
0881
0882 .globl __hypervisor_xcall_flush_tlb_page
0883 __hypervisor_xcall_flush_tlb_page:
0884
0885 mov %o0, %g2
0886 mov %o1, %g3
0887 mov %o2, %g4
0888 mov %g1, %o0
0889 mov %g5, %o1
0890 mov HV_MMU_ALL, %o2
0891 srlx %o0, PAGE_SHIFT, %o0
0892 sllx %o0, PAGE_SHIFT, %o0
0893 ta HV_MMU_UNMAP_ADDR_TRAP
0894 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
0895 brnz,a,pn %o0, 1f
0896 mov %o0, %g5
0897 mov %g2, %o0
0898 mov %g3, %o1
0899 mov %g4, %o2
0900 membar #Sync
0901 retry
0902 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
0903 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
0904 nop
0905
0906 .globl __hypervisor_xcall_flush_tlb_kernel_range
0907 __hypervisor_xcall_flush_tlb_kernel_range:
0908
0909 sethi %hi(PAGE_SIZE - 1), %g2
0910 or %g2, %lo(PAGE_SIZE - 1), %g2
0911 andn %g1, %g2, %g1
0912 andn %g7, %g2, %g7
0913 sub %g7, %g1, %g3
0914 srlx %g3, 18, %g7
0915 add %g2, 1, %g2
0916 sub %g3, %g2, %g3
0917 mov %o0, %g2
0918 mov %o1, %g4
0919 brnz,pn %g7, 2f
0920 mov %o2, %g7
0921 1: add %g1, %g3, %o0
0922 mov 0, %o1
0923 mov HV_MMU_ALL, %o2
0924 ta HV_MMU_UNMAP_ADDR_TRAP
0925 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
0926 brnz,pn %o0, 1f
0927 mov %o0, %g5
0928 sethi %hi(PAGE_SIZE), %o2
0929 brnz,pt %g3, 1b
0930 sub %g3, %o2, %g3
0931 5: mov %g2, %o0
0932 mov %g4, %o1
0933 mov %g7, %o2
0934 membar #Sync
0935 retry
0936 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
0937 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
0938 nop
0939 2: mov %o3, %g1
0940 mov %o5, %g3
0941 mov 0, %o0
0942 mov 0, %o1
0943 mov 0, %o2
0944 mov HV_MMU_ALL, %o3
0945 mov HV_FAST_MMU_DEMAP_CTX, %o5
0946 ta HV_FAST_TRAP
0947 mov %g1, %o3
0948 brz,pt %o0, 5b
0949 mov %g3, %o5
0950 mov HV_FAST_MMU_DEMAP_CTX, %g6
0951 ba,pt %xcc, 1b
0952 clr %g5
0953
0954
0955 .globl xcall_call_function
0956 xcall_call_function:
0957 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
0958 retry
0959
0960 .globl xcall_call_function_single
0961 xcall_call_function_single:
0962 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
0963 retry
0964
0965 .globl xcall_receive_signal
0966 xcall_receive_signal:
0967 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
0968 retry
0969
0970 .globl xcall_capture
0971 xcall_capture:
0972 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
0973 retry
0974
0975 #ifdef CONFIG_KGDB
0976 .globl xcall_kgdb_capture
0977 xcall_kgdb_capture:
0978 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
0979 retry
0980 #endif
0981
0982 #endif
0983
0984 .globl cheetah_patch_cachetlbops
0985 cheetah_patch_cachetlbops:
0986 save %sp, -128, %sp
0987
0988 sethi %hi(__flush_tlb_mm), %o0
0989 or %o0, %lo(__flush_tlb_mm), %o0
0990 sethi %hi(__cheetah_flush_tlb_mm), %o1
0991 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
0992 call tlb_patch_one
0993 mov 19, %o2
0994
0995 sethi %hi(__flush_tlb_page), %o0
0996 or %o0, %lo(__flush_tlb_page), %o0
0997 sethi %hi(__cheetah_flush_tlb_page), %o1
0998 or %o1, %lo(__cheetah_flush_tlb_page), %o1
0999 call tlb_patch_one
1000 mov 22, %o2
1001
1002 sethi %hi(__flush_tlb_pending), %o0
1003 or %o0, %lo(__flush_tlb_pending), %o0
1004 sethi %hi(__cheetah_flush_tlb_pending), %o1
1005 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1006 call tlb_patch_one
1007 mov 27, %o2
1008
1009 sethi %hi(__flush_tlb_kernel_range), %o0
1010 or %o0, %lo(__flush_tlb_kernel_range), %o0
1011 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1012 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1013 call tlb_patch_one
1014 mov 31, %o2
1015
1016 #ifdef DCACHE_ALIASING_POSSIBLE
1017 sethi %hi(__flush_dcache_page), %o0
1018 or %o0, %lo(__flush_dcache_page), %o0
1019 sethi %hi(__cheetah_flush_dcache_page), %o1
1020 or %o1, %lo(__cheetah_flush_dcache_page), %o1
1021 call tlb_patch_one
1022 mov 11, %o2
1023 #endif
1024
1025 #ifdef CONFIG_SMP
1026 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1027 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1028 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1029 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1030 call tlb_patch_one
1031 mov 44, %o2
1032 #endif
1033
1034 ret
1035 restore
1036
1037 .globl hypervisor_patch_cachetlbops
1038 hypervisor_patch_cachetlbops:
1039 save %sp, -128, %sp
1040
1041 sethi %hi(__flush_tlb_mm), %o0
1042 or %o0, %lo(__flush_tlb_mm), %o0
1043 sethi %hi(__hypervisor_flush_tlb_mm), %o1
1044 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
1045 call tlb_patch_one
1046 mov 19, %o2
1047
1048 sethi %hi(__flush_tlb_page), %o0
1049 or %o0, %lo(__flush_tlb_page), %o0
1050 sethi %hi(__hypervisor_flush_tlb_page), %o1
1051 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
1052 call tlb_patch_one
1053 mov 22, %o2
1054
1055 sethi %hi(__flush_tlb_pending), %o0
1056 or %o0, %lo(__flush_tlb_pending), %o0
1057 sethi %hi(__hypervisor_flush_tlb_pending), %o1
1058 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
1059 call tlb_patch_one
1060 mov 27, %o2
1061
1062 sethi %hi(__flush_tlb_kernel_range), %o0
1063 or %o0, %lo(__flush_tlb_kernel_range), %o0
1064 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
1065 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1066 call tlb_patch_one
1067 mov 31, %o2
1068
1069 #ifdef DCACHE_ALIASING_POSSIBLE
1070 sethi %hi(__flush_dcache_page), %o0
1071 or %o0, %lo(__flush_dcache_page), %o0
1072 sethi %hi(__hypervisor_flush_dcache_page), %o1
1073 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
1074 call tlb_patch_one
1075 mov 2, %o2
1076 #endif
1077
1078 #ifdef CONFIG_SMP
1079 sethi %hi(xcall_flush_tlb_mm), %o0
1080 or %o0, %lo(xcall_flush_tlb_mm), %o0
1081 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
1082 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1083 call tlb_patch_one
1084 mov 24, %o2
1085
1086 sethi %hi(xcall_flush_tlb_page), %o0
1087 or %o0, %lo(xcall_flush_tlb_page), %o0
1088 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
1089 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1090 call tlb_patch_one
1091 mov 20, %o2
1092
1093 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1094 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1095 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1096 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1097 call tlb_patch_one
1098 mov 44, %o2
1099 #endif
1100
1101 ret
1102 restore