Back to home page

LXR

 
 

    


0001 /*
0002  *    S390 low-level entry points.
0003  *
0004  *    Copyright IBM Corp. 1999, 2012
0005  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
0006  *       Hartmut Penner (hp@de.ibm.com),
0007  *       Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
0008  *       Heiko Carstens <heiko.carstens@de.ibm.com>
0009  */
0010 
0011 #include <linux/init.h>
0012 #include <linux/linkage.h>
0013 #include <asm/processor.h>
0014 #include <asm/cache.h>
0015 #include <asm/errno.h>
0016 #include <asm/ptrace.h>
0017 #include <asm/thread_info.h>
0018 #include <asm/asm-offsets.h>
0019 #include <asm/unistd.h>
0020 #include <asm/page.h>
0021 #include <asm/sigp.h>
0022 #include <asm/irq.h>
0023 #include <asm/vx-insn.h>
0024 #include <asm/setup.h>
0025 #include <asm/nmi.h>
0026 #include <asm/export.h>
0027 
0028 __PT_R0      =  __PT_GPRS
0029 __PT_R1      =  __PT_GPRS + 8
0030 __PT_R2      =  __PT_GPRS + 16
0031 __PT_R3      =  __PT_GPRS + 24
0032 __PT_R4      =  __PT_GPRS + 32
0033 __PT_R5      =  __PT_GPRS + 40
0034 __PT_R6      =  __PT_GPRS + 48
0035 __PT_R7      =  __PT_GPRS + 56
0036 __PT_R8      =  __PT_GPRS + 64
0037 __PT_R9      =  __PT_GPRS + 72
0038 __PT_R10     =  __PT_GPRS + 80
0039 __PT_R11     =  __PT_GPRS + 88
0040 __PT_R12     =  __PT_GPRS + 96
0041 __PT_R13     =  __PT_GPRS + 104
0042 __PT_R14     =  __PT_GPRS + 112
0043 __PT_R15     =  __PT_GPRS + 120
0044 
0045 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
0046 STACK_SIZE  = 1 << STACK_SHIFT
0047 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
0048 
0049 _TIF_WORK   = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
0050            _TIF_UPROBE)
0051 _TIF_TRACE  = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
0052            _TIF_SYSCALL_TRACEPOINT)
0053 _CIF_WORK   = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
0054 _PIF_WORK   = (_PIF_PER_TRAP)
0055 
0056 #define BASED(name) name-cleanup_critical(%r13)
0057 
0058     .macro  TRACE_IRQS_ON
0059 #ifdef CONFIG_TRACE_IRQFLAGS
0060     basr    %r2,%r0
0061     brasl   %r14,trace_hardirqs_on_caller
0062 #endif
0063     .endm
0064 
0065     .macro  TRACE_IRQS_OFF
0066 #ifdef CONFIG_TRACE_IRQFLAGS
0067     basr    %r2,%r0
0068     brasl   %r14,trace_hardirqs_off_caller
0069 #endif
0070     .endm
0071 
0072     .macro  LOCKDEP_SYS_EXIT
0073 #ifdef CONFIG_LOCKDEP
0074     tm  __PT_PSW+1(%r11),0x01   # returning to user ?
0075     jz  .+10
0076     brasl   %r14,lockdep_sys_exit
0077 #endif
0078     .endm
0079 
0080     .macro  CHECK_STACK stacksize,savearea
0081 #ifdef CONFIG_CHECK_STACK
0082     tml %r15,\stacksize - CONFIG_STACK_GUARD
0083     lghi    %r14,\savearea
0084     jz  stack_overflow
0085 #endif
0086     .endm
0087 
0088     .macro  SWITCH_ASYNC savearea,timer
0089     tmhh    %r8,0x0001      # interrupting from user ?
0090     jnz 1f
0091     lgr %r14,%r9
0092     slg %r14,BASED(.Lcritical_start)
0093     clg %r14,BASED(.Lcritical_length)
0094     jhe 0f
0095     lghi    %r11,\savearea      # inside critical section, do cleanup
0096     brasl   %r14,cleanup_critical
0097     tmhh    %r8,0x0001      # retest problem state after cleanup
0098     jnz 1f
0099 0:  lg  %r14,__LC_ASYNC_STACK   # are we already on the async stack?
0100     slgr    %r14,%r15
0101     srag    %r14,%r14,STACK_SHIFT
0102     jnz 2f
0103     CHECK_STACK 1<<STACK_SHIFT,\savearea
0104     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
0105     j   3f
0106 1:  LAST_BREAK %r14
0107     UPDATE_VTIME %r14,%r15,\timer
0108 2:  lg  %r15,__LC_ASYNC_STACK   # load async stack
0109 3:  la  %r11,STACK_FRAME_OVERHEAD(%r15)
0110     .endm
0111 
0112     .macro UPDATE_VTIME w1,w2,enter_timer
0113     lg  \w1,__LC_EXIT_TIMER
0114     lg  \w2,__LC_LAST_UPDATE_TIMER
0115     slg \w1,\enter_timer
0116     slg \w2,__LC_EXIT_TIMER
0117     alg \w1,__LC_USER_TIMER
0118     alg \w2,__LC_SYSTEM_TIMER
0119     stg \w1,__LC_USER_TIMER
0120     stg \w2,__LC_SYSTEM_TIMER
0121     mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
0122     .endm
0123 
0124     .macro  LAST_BREAK scratch
0125     srag    \scratch,%r10,23
0126 #ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
0127     jz  .+10
0128     stg %r10,__TASK_thread+__THREAD_last_break(%r12)
0129 #else
0130     jz  .+14
0131     lghi    \scratch,__TASK_thread
0132     stg %r10,__THREAD_last_break(\scratch,%r12)
0133 #endif
0134     .endm
0135 
0136     .macro REENABLE_IRQS
0137     stg %r8,__LC_RETURN_PSW
0138     ni  __LC_RETURN_PSW,0xbf
0139     ssm __LC_RETURN_PSW
0140     .endm
0141 
0142     .macro STCK savearea
0143 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
0144     .insn   s,0xb27c0000,\savearea      # store clock fast
0145 #else
0146     .insn   s,0xb2050000,\savearea      # store clock
0147 #endif
0148     .endm
0149 
0150     /*
0151      * The TSTMSK macro generates a test-under-mask instruction by
0152      * calculating the memory offset for the specified mask value.
0153      * Mask value can be any constant.  The macro shifts the mask
0154      * value to calculate the memory offset for the test-under-mask
0155      * instruction.
0156      */
0157     .macro TSTMSK addr, mask, size=8, bytepos=0
0158         .if (\bytepos < \size) && (\mask >> 8)
0159             .if (\mask & 0xff)
0160                 .error "Mask exceeds byte boundary"
0161             .endif
0162             TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
0163             .exitm
0164         .endif
0165         .ifeq \mask
0166             .error "Mask must not be zero"
0167         .endif
0168         off = \size - \bytepos - 1
0169         tm  off+\addr, \mask
0170     .endm
0171 
0172     .section .kprobes.text, "ax"
0173 .Ldummy:
0174     /*
0175      * This nop exists only in order to avoid that __switch_to starts at
0176      * the beginning of the kprobes text section. In that case we would
0177      * have several symbols at the same address. E.g. objdump would take
0178      * an arbitrary symbol name when disassembling this code.
0179      * With the added nop in between the __switch_to symbol is unique
0180      * again.
0181      */
0182     nop 0
0183 
0184 /*
0185  * Scheduler resume function, called by switch_to
0186  *  gpr2 = (task_struct *) prev
0187  *  gpr3 = (task_struct *) next
0188  * Returns:
0189  *  gpr2 = prev
0190  */
0191 ENTRY(__switch_to)
0192     stmg    %r6,%r15,__SF_GPRS(%r15)    # store gprs of prev task
0193     lgr %r1,%r2
0194     aghi    %r1,__TASK_thread       # thread_struct of prev task
0195     lg  %r5,__TASK_stack(%r3)       # start of kernel stack of next
0196     stg %r15,__THREAD_ksp(%r1)      # store kernel stack of prev
0197     lgr %r1,%r3
0198     aghi    %r1,__TASK_thread       # thread_struct of next task
0199     lgr %r15,%r5
0200     aghi    %r15,STACK_INIT         # end of kernel stack of next
0201     stg %r3,__LC_CURRENT        # store task struct of next
0202     stg %r15,__LC_KERNEL_STACK      # store end of kernel stack
0203     lg  %r15,__THREAD_ksp(%r1)      # load kernel stack of next
0204     /* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
0205     lctl    %c4,%c4,__TASK_pid(%r3)     # load pid to control reg. 4
0206     mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
0207     lmg %r6,%r15,__SF_GPRS(%r15)    # load gprs of next task
0208     TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
0209     bzr %r14
0210     .insn   s,0xb2800000,__LC_LPP       # set program parameter
0211     br  %r14
0212 
0213 .L__critical_start:
0214 
0215 #if IS_ENABLED(CONFIG_KVM)
0216 /*
0217  * sie64a calling convention:
0218  * %r2 pointer to sie control block
0219  * %r3 guest register save area
0220  */
0221 ENTRY(sie64a)
0222     stmg    %r6,%r14,__SF_GPRS(%r15)    # save kernel registers
0223     stg %r2,__SF_EMPTY(%r15)        # save control block pointer
0224     stg %r3,__SF_EMPTY+8(%r15)      # save guest register save area
0225     xc  __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
0226     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU     # load guest fp/vx registers ?
0227     jno .Lsie_load_guest_gprs
0228     brasl   %r14,load_fpu_regs      # load guest fp/vx regs
0229 .Lsie_load_guest_gprs:
0230     lmg %r0,%r13,0(%r3)         # load guest gprs 0-13
0231     lg  %r14,__LC_GMAP          # get gmap pointer
0232     ltgr    %r14,%r14
0233     jz  .Lsie_gmap
0234     lctlg   %c1,%c1,__GMAP_ASCE(%r14)   # load primary asce
0235 .Lsie_gmap:
0236     lg  %r14,__SF_EMPTY(%r15)       # get control block pointer
0237     oi  __SIE_PROG0C+3(%r14),1      # we are going into SIE now
0238     tm  __SIE_PROG20+3(%r14),3      # last exit...
0239     jnz .Lsie_skip
0240     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
0241     jo  .Lsie_skip          # exit if fp/vx regs changed
0242     sie 0(%r14)
0243 .Lsie_skip:
0244     ni  __SIE_PROG0C+3(%r14),0xfe   # no longer in SIE
0245     lctlg   %c1,%c1,__LC_USER_ASCE      # load primary asce
0246 .Lsie_done:
0247 # some program checks are suppressing. C code (e.g. do_protection_exception)
0248 # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
0249 # instructions between sie64a and .Lsie_done should not cause program
0250 # interrupts. So lets use a nop (47 00 00 00) as a landing pad.
0251 # See also .Lcleanup_sie
0252 .Lrewind_pad:
0253     nop 0
0254     .globl sie_exit
0255 sie_exit:
0256     lg  %r14,__SF_EMPTY+8(%r15)     # load guest register save area
0257     stmg    %r0,%r13,0(%r14)        # save guest gprs 0-13
0258     lmg %r6,%r14,__SF_GPRS(%r15)    # restore kernel registers
0259     lg  %r2,__SF_EMPTY+16(%r15)     # return exit reason code
0260     br  %r14
0261 .Lsie_fault:
0262     lghi    %r14,-EFAULT
0263     stg %r14,__SF_EMPTY+16(%r15)    # set exit reason code
0264     j   sie_exit
0265 
0266     EX_TABLE(.Lrewind_pad,.Lsie_fault)
0267     EX_TABLE(sie_exit,.Lsie_fault)
0268 EXPORT_SYMBOL(sie64a)
0269 EXPORT_SYMBOL(sie_exit)
0270 #endif
0271 
0272 /*
0273  * SVC interrupt handler routine. System calls are synchronous events and
0274  * are executed with interrupts enabled.
0275  */
0276 
0277 ENTRY(system_call)
0278     stpt    __LC_SYNC_ENTER_TIMER
0279 .Lsysc_stmg:
0280     stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
0281     lg  %r10,__LC_LAST_BREAK
0282     lg  %r12,__LC_CURRENT
0283     lghi    %r14,_PIF_SYSCALL
0284 .Lsysc_per:
0285     lg  %r15,__LC_KERNEL_STACK
0286     la  %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
0287     LAST_BREAK %r13
0288 .Lsysc_vtime:
0289     UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
0290     stmg    %r0,%r7,__PT_R0(%r11)
0291     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
0292     mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
0293     mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
0294     stg %r14,__PT_FLAGS(%r11)
0295 .Lsysc_do_svc:
0296     # load address of system call table
0297 #ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
0298     lg  %r10,__TASK_thread+__THREAD_sysc_table(%r12)
0299 #else
0300     lghi    %r13,__TASK_thread
0301     lg  %r10,__THREAD_sysc_table(%r13,%r12)
0302 #endif
0303     llgh    %r8,__PT_INT_CODE+2(%r11)
0304     slag    %r8,%r8,2           # shift and test for svc 0
0305     jnz .Lsysc_nr_ok
0306     # svc 0: system call number in %r1
0307     llgfr   %r1,%r1             # clear high word in r1
0308     cghi    %r1,NR_syscalls
0309     jnl .Lsysc_nr_ok
0310     sth %r1,__PT_INT_CODE+2(%r11)
0311     slag    %r8,%r1,2
0312 .Lsysc_nr_ok:
0313     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0314     stg %r2,__PT_ORIG_GPR2(%r11)
0315     stg %r7,STACK_FRAME_OVERHEAD(%r15)
0316     lgf %r9,0(%r8,%r10)         # get system call add.
0317     TSTMSK  __TI_flags(%r12),_TIF_TRACE
0318     jnz .Lsysc_tracesys
0319     basr    %r14,%r9            # call sys_xxxx
0320     stg %r2,__PT_R2(%r11)       # store return value
0321 
0322 .Lsysc_return:
0323     LOCKDEP_SYS_EXIT
0324 .Lsysc_tif:
0325     TSTMSK  __PT_FLAGS(%r11),_PIF_WORK
0326     jnz .Lsysc_work
0327     TSTMSK  __TI_flags(%r12),_TIF_WORK
0328     jnz .Lsysc_work         # check for work
0329     TSTMSK  __LC_CPU_FLAGS,_CIF_WORK
0330     jnz .Lsysc_work
0331 .Lsysc_restore:
0332     lg  %r14,__LC_VDSO_PER_CPU
0333     lmg %r0,%r10,__PT_R0(%r11)
0334     mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
0335     stpt    __LC_EXIT_TIMER
0336     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0337     lmg %r11,%r15,__PT_R11(%r11)
0338     lpswe   __LC_RETURN_PSW
0339 .Lsysc_done:
0340 
0341 #
0342 # One of the work bits is on. Find out which one.
0343 #
0344 .Lsysc_work:
0345     TSTMSK  __LC_CPU_FLAGS,_CIF_MCCK_PENDING
0346     jo  .Lsysc_mcck_pending
0347     TSTMSK  __TI_flags(%r12),_TIF_NEED_RESCHED
0348     jo  .Lsysc_reschedule
0349 #ifdef CONFIG_UPROBES
0350     TSTMSK  __TI_flags(%r12),_TIF_UPROBE
0351     jo  .Lsysc_uprobe_notify
0352 #endif
0353     TSTMSK  __PT_FLAGS(%r11),_PIF_PER_TRAP
0354     jo  .Lsysc_singlestep
0355     TSTMSK  __TI_flags(%r12),_TIF_SIGPENDING
0356     jo  .Lsysc_sigpending
0357     TSTMSK  __TI_flags(%r12),_TIF_NOTIFY_RESUME
0358     jo  .Lsysc_notify_resume
0359     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
0360     jo  .Lsysc_vxrs
0361     TSTMSK  __LC_CPU_FLAGS,_CIF_ASCE
0362     jo  .Lsysc_uaccess
0363     j   .Lsysc_return       # beware of critical section cleanup
0364 
0365 #
0366 # _TIF_NEED_RESCHED is set, call schedule
0367 #
0368 .Lsysc_reschedule:
0369     larl    %r14,.Lsysc_return
0370     jg  schedule
0371 
0372 #
0373 # _CIF_MCCK_PENDING is set, call handler
0374 #
0375 .Lsysc_mcck_pending:
0376     larl    %r14,.Lsysc_return
0377     jg  s390_handle_mcck    # TIF bit will be cleared by handler
0378 
0379 #
0380 # _CIF_ASCE is set, load user space asce
0381 #
0382 .Lsysc_uaccess:
0383     ni  __LC_CPU_FLAGS+7,255-_CIF_ASCE
0384     lctlg   %c1,%c1,__LC_USER_ASCE      # load primary asce
0385     j   .Lsysc_return
0386 
0387 #
0388 # CIF_FPU is set, restore floating-point controls and floating-point registers.
0389 #
0390 .Lsysc_vxrs:
0391     larl    %r14,.Lsysc_return
0392     jg  load_fpu_regs
0393 
0394 #
0395 # _TIF_SIGPENDING is set, call do_signal
0396 #
0397 .Lsysc_sigpending:
0398     lgr %r2,%r11        # pass pointer to pt_regs
0399     brasl   %r14,do_signal
0400     TSTMSK  __PT_FLAGS(%r11),_PIF_SYSCALL
0401     jno .Lsysc_return
0402     lmg %r2,%r7,__PT_R2(%r11)   # load svc arguments
0403     lghi    %r8,0           # svc 0 returns -ENOSYS
0404     llgh    %r1,__PT_INT_CODE+2(%r11)   # load new svc number
0405     cghi    %r1,NR_syscalls
0406     jnl .Lsysc_nr_ok        # invalid svc number -> do svc 0
0407     slag    %r8,%r1,2
0408     j   .Lsysc_nr_ok        # restart svc
0409 
0410 #
0411 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
0412 #
0413 .Lsysc_notify_resume:
0414     lgr %r2,%r11        # pass pointer to pt_regs
0415     larl    %r14,.Lsysc_return
0416     jg  do_notify_resume
0417 
0418 #
0419 # _TIF_UPROBE is set, call uprobe_notify_resume
0420 #
0421 #ifdef CONFIG_UPROBES
0422 .Lsysc_uprobe_notify:
0423     lgr %r2,%r11        # pass pointer to pt_regs
0424     larl    %r14,.Lsysc_return
0425     jg  uprobe_notify_resume
0426 #endif
0427 
0428 #
0429 # _PIF_PER_TRAP is set, call do_per_trap
0430 #
0431 .Lsysc_singlestep:
0432     ni  __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
0433     lgr %r2,%r11        # pass pointer to pt_regs
0434     larl    %r14,.Lsysc_return
0435     jg  do_per_trap
0436 
0437 #
0438 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
0439 # and after the system call
0440 #
0441 .Lsysc_tracesys:
0442     lgr %r2,%r11        # pass pointer to pt_regs
0443     la  %r3,0
0444     llgh    %r0,__PT_INT_CODE+2(%r11)
0445     stg %r0,__PT_R2(%r11)
0446     brasl   %r14,do_syscall_trace_enter
0447     lghi    %r0,NR_syscalls
0448     clgr    %r0,%r2
0449     jnh .Lsysc_tracenogo
0450     sllg    %r8,%r2,2
0451     lgf %r9,0(%r8,%r10)
0452 .Lsysc_tracego:
0453     lmg %r3,%r7,__PT_R3(%r11)
0454     stg %r7,STACK_FRAME_OVERHEAD(%r15)
0455     lg  %r2,__PT_ORIG_GPR2(%r11)
0456     basr    %r14,%r9        # call sys_xxx
0457     stg %r2,__PT_R2(%r11)   # store return value
0458 .Lsysc_tracenogo:
0459     TSTMSK  __TI_flags(%r12),_TIF_TRACE
0460     jz  .Lsysc_return
0461     lgr %r2,%r11        # pass pointer to pt_regs
0462     larl    %r14,.Lsysc_return
0463     jg  do_syscall_trace_exit
0464 
0465 #
0466 # a new process exits the kernel with ret_from_fork
0467 #
0468 ENTRY(ret_from_fork)
0469     la  %r11,STACK_FRAME_OVERHEAD(%r15)
0470     lg  %r12,__LC_CURRENT
0471     brasl   %r14,schedule_tail
0472     TRACE_IRQS_ON
0473     ssm __LC_SVC_NEW_PSW    # reenable interrupts
0474     tm  __PT_PSW+1(%r11),0x01   # forking a kernel thread ?
0475     jne .Lsysc_tracenogo
0476     # it's a kernel thread
0477     lmg %r9,%r10,__PT_R9(%r11)  # load gprs
0478 ENTRY(kernel_thread_starter)
0479     la  %r2,0(%r10)
0480     basr    %r14,%r9
0481     j   .Lsysc_tracenogo
0482 
0483 /*
0484  * Program check handler routine
0485  */
0486 
0487 ENTRY(pgm_check_handler)
0488     stpt    __LC_SYNC_ENTER_TIMER
0489     stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
0490     lg  %r10,__LC_LAST_BREAK
0491     lg  %r12,__LC_CURRENT
0492     larl    %r13,cleanup_critical
0493     lmg %r8,%r9,__LC_PGM_OLD_PSW
0494     tmhh    %r8,0x0001      # test problem state bit
0495     jnz 2f          # -> fault in user space
0496 #if IS_ENABLED(CONFIG_KVM)
0497     # cleanup critical section for sie64a
0498     lgr %r14,%r9
0499     slg %r14,BASED(.Lsie_critical_start)
0500     clg %r14,BASED(.Lsie_critical_length)
0501     jhe 0f
0502     brasl   %r14,.Lcleanup_sie
0503 #endif
0504 0:  tmhh    %r8,0x4000      # PER bit set in old PSW ?
0505     jnz 1f          # -> enabled, can't be a double fault
0506     tm  __LC_PGM_ILC+3,0x80 # check for per exception
0507     jnz .Lpgm_svcper        # -> single stepped svc
0508 1:  CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
0509     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
0510     j   3f
0511 2:  LAST_BREAK %r14
0512     UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
0513     lg  %r15,__LC_KERNEL_STACK
0514     lgr %r14,%r12
0515     aghi    %r14,__TASK_thread  # pointer to thread_struct
0516     lghi    %r13,__LC_PGM_TDB
0517     tm  __LC_PGM_ILC+2,0x02 # check for transaction abort
0518     jz  3f
0519     mvc __THREAD_trap_tdb(256,%r14),0(%r13)
0520 3:  la  %r11,STACK_FRAME_OVERHEAD(%r15)
0521     stmg    %r0,%r7,__PT_R0(%r11)
0522     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
0523     stmg    %r8,%r9,__PT_PSW(%r11)
0524     mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
0525     mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
0526     xc  __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
0527     stg %r10,__PT_ARGS(%r11)
0528     tm  __LC_PGM_ILC+3,0x80 # check for per exception
0529     jz  4f
0530     tmhh    %r8,0x0001      # kernel per event ?
0531     jz  .Lpgm_kprobe
0532     oi  __PT_FLAGS+7(%r11),_PIF_PER_TRAP
0533     mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
0534     mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
0535     mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
0536 4:  REENABLE_IRQS
0537     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0538     larl    %r1,pgm_check_table
0539     llgh    %r10,__PT_INT_CODE+2(%r11)
0540     nill    %r10,0x007f
0541     sll %r10,2
0542     je  .Lpgm_return
0543     lgf %r1,0(%r10,%r1)     # load address of handler routine
0544     lgr %r2,%r11        # pass pointer to pt_regs
0545     basr    %r14,%r1        # branch to interrupt-handler
0546 .Lpgm_return:
0547     LOCKDEP_SYS_EXIT
0548     tm  __PT_PSW+1(%r11),0x01   # returning to user ?
0549     jno .Lsysc_restore
0550     j   .Lsysc_tif
0551 
0552 #
0553 # PER event in supervisor state, must be kprobes
0554 #
0555 .Lpgm_kprobe:
0556     REENABLE_IRQS
0557     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0558     lgr %r2,%r11        # pass pointer to pt_regs
0559     brasl   %r14,do_per_trap
0560     j   .Lpgm_return
0561 
0562 #
0563 # single stepped system call
0564 #
0565 .Lpgm_svcper:
0566     mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
0567     larl    %r14,.Lsysc_per
0568     stg %r14,__LC_RETURN_PSW+8
0569     lghi    %r14,_PIF_SYSCALL | _PIF_PER_TRAP
0570     lpswe   __LC_RETURN_PSW     # branch to .Lsysc_per and enable irqs
0571 
0572 /*
0573  * IO interrupt handler routine
0574  */
0575 ENTRY(io_int_handler)
0576     STCK    __LC_INT_CLOCK
0577     stpt    __LC_ASYNC_ENTER_TIMER
0578     stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
0579     lg  %r10,__LC_LAST_BREAK
0580     lg  %r12,__LC_CURRENT
0581     larl    %r13,cleanup_critical
0582     lmg %r8,%r9,__LC_IO_OLD_PSW
0583     SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
0584     stmg    %r0,%r7,__PT_R0(%r11)
0585     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
0586     stmg    %r8,%r9,__PT_PSW(%r11)
0587     mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
0588     xc  __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
0589     TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
0590     jo  .Lio_restore
0591     TRACE_IRQS_OFF
0592     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0593 .Lio_loop:
0594     lgr %r2,%r11        # pass pointer to pt_regs
0595     lghi    %r3,IO_INTERRUPT
0596     tm  __PT_INT_CODE+8(%r11),0x80  # adapter interrupt ?
0597     jz  .Lio_call
0598     lghi    %r3,THIN_INTERRUPT
0599 .Lio_call:
0600     brasl   %r14,do_IRQ
0601     TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
0602     jz  .Lio_return
0603     tpi 0
0604     jz  .Lio_return
0605     mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
0606     j   .Lio_loop
0607 .Lio_return:
0608     LOCKDEP_SYS_EXIT
0609     TRACE_IRQS_ON
0610 .Lio_tif:
0611     TSTMSK  __TI_flags(%r12),_TIF_WORK
0612     jnz .Lio_work       # there is work to do (signals etc.)
0613     TSTMSK  __LC_CPU_FLAGS,_CIF_WORK
0614     jnz .Lio_work
0615 .Lio_restore:
0616     lg  %r14,__LC_VDSO_PER_CPU
0617     lmg %r0,%r10,__PT_R0(%r11)
0618     mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
0619     stpt    __LC_EXIT_TIMER
0620     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0621     lmg %r11,%r15,__PT_R11(%r11)
0622     lpswe   __LC_RETURN_PSW
0623 .Lio_done:
0624 
0625 #
0626 # There is work todo, find out in which context we have been interrupted:
0627 # 1) if we return to user space we can do all _TIF_WORK work
0628 # 2) if we return to kernel code and kvm is enabled check if we need to
0629 #    modify the psw to leave SIE
0630 # 3) if we return to kernel code and preemptive scheduling is enabled check
0631 #    the preemption counter and if it is zero call preempt_schedule_irq
0632 # Before any work can be done, a switch to the kernel stack is required.
0633 #
0634 .Lio_work:
0635     tm  __PT_PSW+1(%r11),0x01   # returning to user ?
0636     jo  .Lio_work_user      # yes -> do resched & signal
0637 #ifdef CONFIG_PREEMPT
0638     # check for preemptive scheduling
0639     icm %r0,15,__LC_PREEMPT_COUNT
0640     jnz .Lio_restore        # preemption is disabled
0641     TSTMSK  __TI_flags(%r12),_TIF_NEED_RESCHED
0642     jno .Lio_restore
0643     # switch to kernel stack
0644     lg  %r1,__PT_R15(%r11)
0645     aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
0646     mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
0647     xc  __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
0648     la  %r11,STACK_FRAME_OVERHEAD(%r1)
0649     lgr %r15,%r1
0650     # TRACE_IRQS_ON already done at .Lio_return, call
0651     # TRACE_IRQS_OFF to keep things symmetrical
0652     TRACE_IRQS_OFF
0653     brasl   %r14,preempt_schedule_irq
0654     j   .Lio_return
0655 #else
0656     j   .Lio_restore
0657 #endif
0658 
0659 #
0660 # Need to do work before returning to userspace, switch to kernel stack
0661 #
0662 .Lio_work_user:
0663     lg  %r1,__LC_KERNEL_STACK
0664     mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
0665     xc  __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
0666     la  %r11,STACK_FRAME_OVERHEAD(%r1)
0667     lgr %r15,%r1
0668 
0669 #
0670 # One of the work bits is on. Find out which one.
0671 #
0672 .Lio_work_tif:
0673     TSTMSK  __LC_CPU_FLAGS,_CIF_MCCK_PENDING
0674     jo  .Lio_mcck_pending
0675     TSTMSK  __TI_flags(%r12),_TIF_NEED_RESCHED
0676     jo  .Lio_reschedule
0677     TSTMSK  __TI_flags(%r12),_TIF_SIGPENDING
0678     jo  .Lio_sigpending
0679     TSTMSK  __TI_flags(%r12),_TIF_NOTIFY_RESUME
0680     jo  .Lio_notify_resume
0681     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
0682     jo  .Lio_vxrs
0683     TSTMSK  __LC_CPU_FLAGS,_CIF_ASCE
0684     jo  .Lio_uaccess
0685     j   .Lio_return     # beware of critical section cleanup
0686 
0687 #
0688 # _CIF_MCCK_PENDING is set, call handler
0689 #
0690 .Lio_mcck_pending:
0691     # TRACE_IRQS_ON already done at .Lio_return
0692     brasl   %r14,s390_handle_mcck   # TIF bit will be cleared by handler
0693     TRACE_IRQS_OFF
0694     j   .Lio_return
0695 
0696 #
0697 # _CIF_ASCE is set, load user space asce
0698 #
0699 .Lio_uaccess:
0700     ni  __LC_CPU_FLAGS+7,255-_CIF_ASCE
0701     lctlg   %c1,%c1,__LC_USER_ASCE      # load primary asce
0702     j   .Lio_return
0703 
0704 #
0705 # CIF_FPU is set, restore floating-point controls and floating-point registers.
0706 #
0707 .Lio_vxrs:
0708     larl    %r14,.Lio_return
0709     jg  load_fpu_regs
0710 
0711 #
0712 # _TIF_NEED_RESCHED is set, call schedule
0713 #
0714 .Lio_reschedule:
0715     # TRACE_IRQS_ON already done at .Lio_return
0716     ssm __LC_SVC_NEW_PSW    # reenable interrupts
0717     brasl   %r14,schedule       # call scheduler
0718     ssm __LC_PGM_NEW_PSW    # disable I/O and ext. interrupts
0719     TRACE_IRQS_OFF
0720     j   .Lio_return
0721 
0722 #
0723 # _TIF_SIGPENDING or is set, call do_signal
0724 #
0725 .Lio_sigpending:
0726     # TRACE_IRQS_ON already done at .Lio_return
0727     ssm __LC_SVC_NEW_PSW    # reenable interrupts
0728     lgr %r2,%r11        # pass pointer to pt_regs
0729     brasl   %r14,do_signal
0730     ssm __LC_PGM_NEW_PSW    # disable I/O and ext. interrupts
0731     TRACE_IRQS_OFF
0732     j   .Lio_return
0733 
0734 #
0735 # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
0736 #
0737 .Lio_notify_resume:
0738     # TRACE_IRQS_ON already done at .Lio_return
0739     ssm __LC_SVC_NEW_PSW    # reenable interrupts
0740     lgr %r2,%r11        # pass pointer to pt_regs
0741     brasl   %r14,do_notify_resume
0742     ssm __LC_PGM_NEW_PSW    # disable I/O and ext. interrupts
0743     TRACE_IRQS_OFF
0744     j   .Lio_return
0745 
0746 /*
0747  * External interrupt handler routine
0748  */
0749 ENTRY(ext_int_handler)
0750     STCK    __LC_INT_CLOCK
0751     stpt    __LC_ASYNC_ENTER_TIMER
0752     stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
0753     lg  %r10,__LC_LAST_BREAK
0754     lg  %r12,__LC_CURRENT
0755     larl    %r13,cleanup_critical
0756     lmg %r8,%r9,__LC_EXT_OLD_PSW
0757     SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
0758     stmg    %r0,%r7,__PT_R0(%r11)
0759     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
0760     stmg    %r8,%r9,__PT_PSW(%r11)
0761     lghi    %r1,__LC_EXT_PARAMS2
0762     mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
0763     mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
0764     mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
0765     xc  __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
0766     TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
0767     jo  .Lio_restore
0768     TRACE_IRQS_OFF
0769     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0770     lgr %r2,%r11        # pass pointer to pt_regs
0771     lghi    %r3,EXT_INTERRUPT
0772     brasl   %r14,do_IRQ
0773     j   .Lio_return
0774 
0775 /*
0776  * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
0777  */
0778 ENTRY(psw_idle)
0779     stg %r3,__SF_EMPTY(%r15)
0780     larl    %r1,.Lpsw_idle_lpsw+4
0781     stg %r1,__SF_EMPTY+8(%r15)
0782 #ifdef CONFIG_SMP
0783     larl    %r1,smp_cpu_mtid
0784     llgf    %r1,0(%r1)
0785     ltgr    %r1,%r1
0786     jz  .Lpsw_idle_stcctm
0787     .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
0788 .Lpsw_idle_stcctm:
0789 #endif
0790     oi  __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
0791     STCK    __CLOCK_IDLE_ENTER(%r2)
0792     stpt    __TIMER_IDLE_ENTER(%r2)
0793 .Lpsw_idle_lpsw:
0794     lpswe   __SF_EMPTY(%r15)
0795     br  %r14
0796 .Lpsw_idle_end:
0797 
0798 /*
0799  * Store floating-point controls and floating-point or vector register
0800  * depending whether the vector facility is available.  A critical section
0801  * cleanup assures that the registers are stored even if interrupted for
0802  * some other work.  The CIF_FPU flag is set to trigger a lazy restore
0803  * of the register contents at return from io or a system call.
0804  */
0805 ENTRY(save_fpu_regs)
0806     lg  %r2,__LC_CURRENT
0807     aghi    %r2,__TASK_thread
0808     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
0809     bor %r14
0810     stfpc   __THREAD_FPU_fpc(%r2)
0811     lg  %r3,__THREAD_FPU_regs(%r2)
0812     TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
0813     jz  .Lsave_fpu_regs_fp    # no -> store FP regs
0814     VSTM    %v0,%v15,0,%r3        # vstm 0,15,0(3)
0815     VSTM    %v16,%v31,256,%r3     # vstm 16,31,256(3)
0816     j   .Lsave_fpu_regs_done      # -> set CIF_FPU flag
0817 .Lsave_fpu_regs_fp:
0818     std 0,0(%r3)
0819     std 1,8(%r3)
0820     std 2,16(%r3)
0821     std 3,24(%r3)
0822     std 4,32(%r3)
0823     std 5,40(%r3)
0824     std 6,48(%r3)
0825     std 7,56(%r3)
0826     std 8,64(%r3)
0827     std 9,72(%r3)
0828     std 10,80(%r3)
0829     std 11,88(%r3)
0830     std 12,96(%r3)
0831     std 13,104(%r3)
0832     std 14,112(%r3)
0833     std 15,120(%r3)
0834 .Lsave_fpu_regs_done:
0835     oi  __LC_CPU_FLAGS+7,_CIF_FPU
0836     br  %r14
0837 .Lsave_fpu_regs_end:
0838 #if IS_ENABLED(CONFIG_KVM)
0839 EXPORT_SYMBOL(save_fpu_regs)
0840 #endif
0841 
0842 /*
0843  * Load floating-point controls and floating-point or vector registers.
0844  * A critical section cleanup assures that the register contents are
0845  * loaded even if interrupted for some other work.
0846  *
0847  * There are special calling conventions to fit into sysc and io return work:
0848  *  %r15:   <kernel stack>
0849  * The function requires:
0850  *  %r4
0851  */
0852 load_fpu_regs:
0853     lg  %r4,__LC_CURRENT
0854     aghi    %r4,__TASK_thread
0855     TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
0856     bnor    %r14
0857     lfpc    __THREAD_FPU_fpc(%r4)
0858     TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
0859     lg  %r4,__THREAD_FPU_regs(%r4)  # %r4 <- reg save area
0860     jz  .Lload_fpu_regs_fp      # -> no VX, load FP regs
0861     VLM %v0,%v15,0,%r4
0862     VLM %v16,%v31,256,%r4
0863     j   .Lload_fpu_regs_done
0864 .Lload_fpu_regs_fp:
0865     ld  0,0(%r4)
0866     ld  1,8(%r4)
0867     ld  2,16(%r4)
0868     ld  3,24(%r4)
0869     ld  4,32(%r4)
0870     ld  5,40(%r4)
0871     ld  6,48(%r4)
0872     ld  7,56(%r4)
0873     ld  8,64(%r4)
0874     ld  9,72(%r4)
0875     ld  10,80(%r4)
0876     ld  11,88(%r4)
0877     ld  12,96(%r4)
0878     ld  13,104(%r4)
0879     ld  14,112(%r4)
0880     ld  15,120(%r4)
0881 .Lload_fpu_regs_done:
0882     ni  __LC_CPU_FLAGS+7,255-_CIF_FPU
0883     br  %r14
0884 .Lload_fpu_regs_end:
0885 
0886 .L__critical_end:
0887 
0888 /*
0889  * Machine check handler routines
0890  */
0891 ENTRY(mcck_int_handler)
0892     STCK    __LC_MCCK_CLOCK
0893     la  %r1,4095        # revalidate r1
0894     spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1)  # revalidate cpu timer
0895     lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
0896     lg  %r10,__LC_LAST_BREAK
0897     lg  %r12,__LC_CURRENT
0898     larl    %r13,cleanup_critical
0899     lmg %r8,%r9,__LC_MCK_OLD_PSW
0900     TSTMSK  __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
0901     jo  .Lmcck_panic        # yes -> rest of mcck code invalid
0902     lghi    %r14,__LC_CPU_TIMER_SAVE_AREA
0903     mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
0904     TSTMSK  __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
0905     jo  3f
0906     la  %r14,__LC_SYNC_ENTER_TIMER
0907     clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
0908     jl  0f
0909     la  %r14,__LC_ASYNC_ENTER_TIMER
0910 0:  clc 0(8,%r14),__LC_EXIT_TIMER
0911     jl  1f
0912     la  %r14,__LC_EXIT_TIMER
0913 1:  clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
0914     jl  2f
0915     la  %r14,__LC_LAST_UPDATE_TIMER
0916 2:  spt 0(%r14)
0917     mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
0918 3:  TSTMSK  __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
0919     jno .Lmcck_panic        # no -> skip cleanup critical
0920     SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
0921 .Lmcck_skip:
0922     lghi    %r14,__LC_GPREGS_SAVE_AREA+64
0923     stmg    %r0,%r7,__PT_R0(%r11)
0924     mvc __PT_R8(64,%r11),0(%r14)
0925     stmg    %r8,%r9,__PT_PSW(%r11)
0926     xc  __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
0927     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
0928     lgr %r2,%r11        # pass pointer to pt_regs
0929     brasl   %r14,s390_do_machine_check
0930     tm  __PT_PSW+1(%r11),0x01   # returning to user ?
0931     jno .Lmcck_return
0932     lg  %r1,__LC_KERNEL_STACK   # switch to kernel stack
0933     mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
0934     xc  __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
0935     la  %r11,STACK_FRAME_OVERHEAD(%r1)
0936     lgr %r15,%r1
0937     ssm __LC_PGM_NEW_PSW    # turn dat on, keep irqs off
0938     TSTMSK  __LC_CPU_FLAGS,_CIF_MCCK_PENDING
0939     jno .Lmcck_return
0940     TRACE_IRQS_OFF
0941     brasl   %r14,s390_handle_mcck
0942     TRACE_IRQS_ON
0943 .Lmcck_return:
0944     lg  %r14,__LC_VDSO_PER_CPU
0945     lmg %r0,%r10,__PT_R0(%r11)
0946     mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
0947     tm  __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
0948     jno 0f
0949     stpt    __LC_EXIT_TIMER
0950     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0951 0:  lmg %r11,%r15,__PT_R11(%r11)
0952     lpswe   __LC_RETURN_MCCK_PSW
0953 
0954 .Lmcck_panic:
0955     lg  %r15,__LC_PANIC_STACK
0956     la  %r11,STACK_FRAME_OVERHEAD(%r15)
0957     j   .Lmcck_skip
0958 
0959 #
0960 # PSW restart interrupt handler
0961 #
0962 ENTRY(restart_int_handler)
0963     TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
0964     jz  0f
0965     .insn   s,0xb2800000,__LC_LPP
0966 0:  stg %r15,__LC_SAVE_AREA_RESTART
0967     lg  %r15,__LC_RESTART_STACK
0968     aghi    %r15,-__PT_SIZE         # create pt_regs on stack
0969     xc  0(__PT_SIZE,%r15),0(%r15)
0970     stmg    %r0,%r14,__PT_R0(%r15)
0971     mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
0972     mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
0973     aghi    %r15,-STACK_FRAME_OVERHEAD  # create stack frame on stack
0974     xc  0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
0975     lg  %r1,__LC_RESTART_FN     # load fn, parm & source cpu
0976     lg  %r2,__LC_RESTART_DATA
0977     lg  %r3,__LC_RESTART_SOURCE
0978     ltgr    %r3,%r3             # test source cpu address
0979     jm  1f              # negative -> skip source stop
0980 0:  sigp    %r4,%r3,SIGP_SENSE      # sigp sense to source cpu
0981     brc 10,0b               # wait for status stored
0982 1:  basr    %r14,%r1            # call function
0983     stap    __SF_EMPTY(%r15)        # store cpu address
0984     llgh    %r3,__SF_EMPTY(%r15)
0985 2:  sigp    %r4,%r3,SIGP_STOP       # sigp stop to current cpu
0986     brc 2,2b
0987 3:  j   3b
0988 
0989     .section .kprobes.text, "ax"
0990 
0991 #ifdef CONFIG_CHECK_STACK
0992 /*
0993  * The synchronous or the asynchronous stack overflowed. We are dead.
0994  * No need to properly save the registers, we are going to panic anyway.
0995  * Setup a pt_regs so that show_trace can provide a good call trace.
0996  */
0997 stack_overflow:
0998     lg  %r15,__LC_PANIC_STACK   # change to panic stack
0999     la  %r11,STACK_FRAME_OVERHEAD(%r15)
1000     stmg    %r0,%r7,__PT_R0(%r11)
1001     stmg    %r8,%r9,__PT_PSW(%r11)
1002     mvc __PT_R8(64,%r11),0(%r14)
1003     stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1004     xc  __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1005     lgr %r2,%r11        # pass pointer to pt_regs
1006     jg  kernel_stack_overflow
1007 #endif
1008 
1009 cleanup_critical:
1010 #if IS_ENABLED(CONFIG_KVM)
1011     clg %r9,BASED(.Lcleanup_table_sie)  # .Lsie_gmap
1012     jl  0f
1013     clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1014     jl  .Lcleanup_sie
1015 #endif
1016     clg %r9,BASED(.Lcleanup_table)  # system_call
1017     jl  0f
1018     clg %r9,BASED(.Lcleanup_table+8)    # .Lsysc_do_svc
1019     jl  .Lcleanup_system_call
1020     clg %r9,BASED(.Lcleanup_table+16)   # .Lsysc_tif
1021     jl  0f
1022     clg %r9,BASED(.Lcleanup_table+24)   # .Lsysc_restore
1023     jl  .Lcleanup_sysc_tif
1024     clg %r9,BASED(.Lcleanup_table+32)   # .Lsysc_done
1025     jl  .Lcleanup_sysc_restore
1026     clg %r9,BASED(.Lcleanup_table+40)   # .Lio_tif
1027     jl  0f
1028     clg %r9,BASED(.Lcleanup_table+48)   # .Lio_restore
1029     jl  .Lcleanup_io_tif
1030     clg %r9,BASED(.Lcleanup_table+56)   # .Lio_done
1031     jl  .Lcleanup_io_restore
1032     clg %r9,BASED(.Lcleanup_table+64)   # psw_idle
1033     jl  0f
1034     clg %r9,BASED(.Lcleanup_table+72)   # .Lpsw_idle_end
1035     jl  .Lcleanup_idle
1036     clg %r9,BASED(.Lcleanup_table+80)   # save_fpu_regs
1037     jl  0f
1038     clg %r9,BASED(.Lcleanup_table+88)   # .Lsave_fpu_regs_end
1039     jl  .Lcleanup_save_fpu_regs
1040     clg %r9,BASED(.Lcleanup_table+96)   # load_fpu_regs
1041     jl  0f
1042     clg %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
1043     jl  .Lcleanup_load_fpu_regs
1044 0:  br  %r14
1045 
1046     .align  8
1047 .Lcleanup_table:
1048     .quad   system_call
1049     .quad   .Lsysc_do_svc
1050     .quad   .Lsysc_tif
1051     .quad   .Lsysc_restore
1052     .quad   .Lsysc_done
1053     .quad   .Lio_tif
1054     .quad   .Lio_restore
1055     .quad   .Lio_done
1056     .quad   psw_idle
1057     .quad   .Lpsw_idle_end
1058     .quad   save_fpu_regs
1059     .quad   .Lsave_fpu_regs_end
1060     .quad   load_fpu_regs
1061     .quad   .Lload_fpu_regs_end
1062 
1063 #if IS_ENABLED(CONFIG_KVM)
1064 .Lcleanup_table_sie:
1065     .quad   .Lsie_gmap
1066     .quad   .Lsie_done
1067 
1068 .Lcleanup_sie:
1069     lg  %r9,__SF_EMPTY(%r15)        # get control block pointer
1070     ni  __SIE_PROG0C+3(%r9),0xfe    # no longer in SIE
1071     lctlg   %c1,%c1,__LC_USER_ASCE      # load primary asce
1072     larl    %r9,sie_exit            # skip forward to sie_exit
1073     br  %r14
1074 #endif
1075 
1076 .Lcleanup_system_call:
1077     # check if stpt has been executed
1078     clg %r9,BASED(.Lcleanup_system_call_insn)
1079     jh  0f
1080     mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1081     cghi    %r11,__LC_SAVE_AREA_ASYNC
1082     je  0f
1083     mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
1084 0:  # check if stmg has been executed
1085     clg %r9,BASED(.Lcleanup_system_call_insn+8)
1086     jh  0f
1087     mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
1088 0:  # check if base register setup + TIF bit load has been done
1089     clg %r9,BASED(.Lcleanup_system_call_insn+16)
1090     jhe 0f
1091     # set up saved registers r10 and r12
1092     stg %r10,16(%r11)       # r10 last break
1093     stg %r12,32(%r11)       # r12 task struct pointer
1094 0:  # check if the user time update has been done
1095     clg %r9,BASED(.Lcleanup_system_call_insn+24)
1096     jh  0f
1097     lg  %r15,__LC_EXIT_TIMER
1098     slg %r15,__LC_SYNC_ENTER_TIMER
1099     alg %r15,__LC_USER_TIMER
1100     stg %r15,__LC_USER_TIMER
1101 0:  # check if the system time update has been done
1102     clg %r9,BASED(.Lcleanup_system_call_insn+32)
1103     jh  0f
1104     lg  %r15,__LC_LAST_UPDATE_TIMER
1105     slg %r15,__LC_EXIT_TIMER
1106     alg %r15,__LC_SYSTEM_TIMER
1107     stg %r15,__LC_SYSTEM_TIMER
1108 0:  # update accounting time stamp
1109     mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1110     # do LAST_BREAK
1111     lg  %r9,16(%r11)
1112     srag    %r9,%r9,23
1113     jz  0f
1114     lgr %r9,%r12
1115     aghi    %r9,__TASK_thread
1116     mvc __THREAD_last_break(8,%r9),16(%r11)
1117 0:  # set up saved register r11
1118     lg  %r15,__LC_KERNEL_STACK
1119     la  %r9,STACK_FRAME_OVERHEAD(%r15)
1120     stg %r9,24(%r11)        # r11 pt_regs pointer
1121     # fill pt_regs
1122     mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1123     stmg    %r0,%r7,__PT_R0(%r9)
1124     mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1125     mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
1126     xc  __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1127     mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
1128     # setup saved register r15
1129     stg %r15,56(%r11)       # r15 stack pointer
1130     # set new psw address and exit
1131     larl    %r9,.Lsysc_do_svc
1132     br  %r14
1133 .Lcleanup_system_call_insn:
1134     .quad   system_call
1135     .quad   .Lsysc_stmg
1136     .quad   .Lsysc_per
1137     .quad   .Lsysc_vtime+36
1138     .quad   .Lsysc_vtime+42
1139 
1140 .Lcleanup_sysc_tif:
1141     larl    %r9,.Lsysc_tif
1142     br  %r14
1143 
1144 .Lcleanup_sysc_restore:
1145     clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1146     je  0f
1147     lg  %r9,24(%r11)        # get saved pointer to pt_regs
1148     mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1149     mvc 0(64,%r11),__PT_R8(%r9)
1150     lmg %r0,%r7,__PT_R0(%r9)
1151 0:  lmg %r8,%r9,__LC_RETURN_PSW
1152     br  %r14
1153 .Lcleanup_sysc_restore_insn:
1154     .quad   .Lsysc_done - 4
1155 
1156 .Lcleanup_io_tif:
1157     larl    %r9,.Lio_tif
1158     br  %r14
1159 
1160 .Lcleanup_io_restore:
1161     clg %r9,BASED(.Lcleanup_io_restore_insn)
1162     je  0f
1163     lg  %r9,24(%r11)        # get saved r11 pointer to pt_regs
1164     mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1165     mvc 0(64,%r11),__PT_R8(%r9)
1166     lmg %r0,%r7,__PT_R0(%r9)
1167 0:  lmg %r8,%r9,__LC_RETURN_PSW
1168     br  %r14
1169 .Lcleanup_io_restore_insn:
1170     .quad   .Lio_done - 4
1171 
1172 .Lcleanup_idle:
1173     ni  __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1174     # copy interrupt clock & cpu timer
1175     mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1176     mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1177     cghi    %r11,__LC_SAVE_AREA_ASYNC
1178     je  0f
1179     mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1180     mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
1181 0:  # check if stck & stpt have been executed
1182     clg %r9,BASED(.Lcleanup_idle_insn)
1183     jhe 1f
1184     mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1185     mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1186 1:  # calculate idle cycles
1187 #ifdef CONFIG_SMP
1188     clg %r9,BASED(.Lcleanup_idle_insn)
1189     jl  3f
1190     larl    %r1,smp_cpu_mtid
1191     llgf    %r1,0(%r1)
1192     ltgr    %r1,%r1
1193     jz  3f
1194     .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1195     larl    %r3,mt_cycles
1196     ag  %r3,__LC_PERCPU_OFFSET
1197     la  %r4,__SF_EMPTY+16(%r15)
1198 2:  lg  %r0,0(%r3)
1199     slg %r0,0(%r4)
1200     alg %r0,64(%r4)
1201     stg %r0,0(%r3)
1202     la  %r3,8(%r3)
1203     la  %r4,8(%r4)
1204     brct    %r1,2b
1205 #endif
1206 3:  # account system time going idle
1207     lg  %r9,__LC_STEAL_TIMER
1208     alg %r9,__CLOCK_IDLE_ENTER(%r2)
1209     slg %r9,__LC_LAST_UPDATE_CLOCK
1210     stg %r9,__LC_STEAL_TIMER
1211     mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1212     lg  %r9,__LC_SYSTEM_TIMER
1213     alg %r9,__LC_LAST_UPDATE_TIMER
1214     slg %r9,__TIMER_IDLE_ENTER(%r2)
1215     stg %r9,__LC_SYSTEM_TIMER
1216     mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1217     # prepare return psw
1218     nihh    %r8,0xfcfd      # clear irq & wait state bits
1219     lg  %r9,48(%r11)        # return from psw_idle
1220     br  %r14
1221 .Lcleanup_idle_insn:
1222     .quad   .Lpsw_idle_lpsw
1223 
1224 .Lcleanup_save_fpu_regs:
1225     larl    %r9,save_fpu_regs
1226     br  %r14
1227 
1228 .Lcleanup_load_fpu_regs:
1229     larl    %r9,load_fpu_regs
1230     br  %r14
1231 
1232 /*
1233  * Integer constants
1234  */
1235     .align  8
1236 .Lcritical_start:
1237     .quad   .L__critical_start
1238 .Lcritical_length:
1239     .quad   .L__critical_end - .L__critical_start
1240 #if IS_ENABLED(CONFIG_KVM)
1241 .Lsie_critical_start:
1242     .quad   .Lsie_gmap
1243 .Lsie_critical_length:
1244     .quad   .Lsie_done - .Lsie_gmap
1245 #endif
1246 
1247     .section .rodata, "a"
1248 #define SYSCALL(esame,emu)  .long esame
1249     .globl  sys_call_table
1250 sys_call_table:
1251 #include "syscalls.S"
1252 #undef SYSCALL
1253 
1254 #ifdef CONFIG_COMPAT
1255 
1256 #define SYSCALL(esame,emu)  .long emu
1257     .globl  sys_call_table_emu
1258 sys_call_table_emu:
1259 #include "syscalls.S"
1260 #undef SYSCALL
1261 #endif