Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* sun4v_ivec.S: Sun4v interrupt vector handling.
0003  *
0004  * Copyright (C) 2006 <davem@davemloft.net>
0005  */
0006 
0007 #include <asm/cpudata.h>
0008 #include <asm/intr_queue.h>
0009 #include <asm/pil.h>
0010 
0011     .text
0012     .align  32
0013 
0014 sun4v_cpu_mondo:
0015     /* Head offset in %g2, tail offset in %g4.
0016      * If they are the same, no work.
0017      */
0018     mov INTRQ_CPU_MONDO_HEAD, %g2
0019     ldxa    [%g2] ASI_QUEUE, %g2
0020     mov INTRQ_CPU_MONDO_TAIL, %g4
0021     ldxa    [%g4] ASI_QUEUE, %g4
0022     cmp %g2, %g4
0023     be,pn   %xcc, sun4v_cpu_mondo_queue_empty
0024      nop
0025 
0026     /* Get &trap_block[smp_processor_id()] into %g4.  */
0027     ldxa    [%g0] ASI_SCRATCHPAD, %g4
0028     sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
0029 
0030     /* Get smp_processor_id() into %g3 */
0031     sethi   %hi(trap_block), %g5
0032     or  %g5, %lo(trap_block), %g5
0033     sub %g4, %g5, %g3
0034     srlx    %g3, TRAP_BLOCK_SZ_SHIFT, %g3
0035 
0036     /* Increment cpu_mondo_counter[smp_processor_id()] */
0037     sethi   %hi(cpu_mondo_counter), %g5
0038     or  %g5, %lo(cpu_mondo_counter), %g5
0039     sllx    %g3, 3, %g3
0040     add %g5, %g3, %g5
0041     ldx [%g5], %g3
0042     add %g3, 1, %g3
0043     stx %g3, [%g5]
0044 
0045     /* Get CPU mondo queue base phys address into %g7.  */
0046     ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
0047 
0048     /* Now get the cross-call arguments and handler PC, same
0049      * layout as sun4u:
0050      *
0051      * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
0052      *                  high half is context arg to MMU flushes, into %g5
0053      * 2nd 64-bit word: 64-bit arg, load into %g1
0054      * 3rd 64-bit word: 64-bit arg, load into %g7
0055      */
0056     ldxa    [%g7 + %g2] ASI_PHYS_USE_EC, %g3
0057     add %g2, 0x8, %g2
0058     srlx    %g3, 32, %g5
0059     ldxa    [%g7 + %g2] ASI_PHYS_USE_EC, %g1
0060     add %g2, 0x8, %g2
0061     srl %g3, 0, %g3
0062     ldxa    [%g7 + %g2] ASI_PHYS_USE_EC, %g7
0063     add %g2, 0x40 - 0x8 - 0x8, %g2
0064 
0065     /* Update queue head pointer.  */
0066     lduw    [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
0067     and %g2, %g4, %g2
0068 
0069     mov INTRQ_CPU_MONDO_HEAD, %g4
0070     stxa    %g2, [%g4] ASI_QUEUE
0071     membar  #Sync
0072 
0073     jmpl    %g3, %g0
0074      nop
0075 
0076 sun4v_cpu_mondo_queue_empty:
0077     retry
0078 
0079 sun4v_dev_mondo:
0080     /* Head offset in %g2, tail offset in %g4.  */
0081     mov INTRQ_DEVICE_MONDO_HEAD, %g2
0082     ldxa    [%g2] ASI_QUEUE, %g2
0083     mov INTRQ_DEVICE_MONDO_TAIL, %g4
0084     ldxa    [%g4] ASI_QUEUE, %g4
0085     cmp %g2, %g4
0086     be,pn   %xcc, sun4v_dev_mondo_queue_empty
0087      nop
0088 
0089     /* Get &trap_block[smp_processor_id()] into %g4.  */
0090     ldxa    [%g0] ASI_SCRATCHPAD, %g4
0091     sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
0092 
0093     /* Get DEV mondo queue base phys address into %g5.  */
0094     ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
0095 
0096     /* Load IVEC into %g3.  */
0097     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0098     add %g2, 0x40, %g2
0099 
0100     /* XXX There can be a full 64-byte block of data here.
0101      * XXX This is how we can get at MSI vector data.
0102      * XXX Current we do not capture this, but when we do we'll
0103      * XXX need to add a 64-byte storage area in the struct ino_bucket
0104      * XXX or the struct irq_desc.
0105      */
0106 
0107     /* Update queue head pointer, this frees up some registers.  */
0108     lduw    [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
0109     and %g2, %g4, %g2
0110 
0111     mov INTRQ_DEVICE_MONDO_HEAD, %g4
0112     stxa    %g2, [%g4] ASI_QUEUE
0113     membar  #Sync
0114 
0115     TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
0116 
0117     /* For VIRQs, cookie is encoded as ~bucket_phys_addr  */
0118     brlz,pt %g3, 1f
0119      xnor   %g3, %g0, %g4
0120 
0121     /* Get __pa(&ivector_table[IVEC]) into %g4.  */
0122     sethi   %hi(ivector_table_pa), %g4
0123     ldx [%g4 + %lo(ivector_table_pa)], %g4
0124     sllx    %g3, 4, %g3
0125     add %g4, %g3, %g4
0126 
0127 1:  ldx [%g1], %g2
0128     stxa    %g2, [%g4] ASI_PHYS_USE_EC
0129     stx %g4, [%g1]
0130 
0131     /* Signal the interrupt by setting (1 << pil) in %softint.  */
0132     wr  %g0, 1 << PIL_DEVICE_IRQ, %set_softint
0133 
0134 sun4v_dev_mondo_queue_empty:
0135     retry
0136 
0137 sun4v_res_mondo:
0138     /* Head offset in %g2, tail offset in %g4.  */
0139     mov INTRQ_RESUM_MONDO_HEAD, %g2
0140     ldxa    [%g2] ASI_QUEUE, %g2
0141     mov INTRQ_RESUM_MONDO_TAIL, %g4
0142     ldxa    [%g4] ASI_QUEUE, %g4
0143     cmp %g2, %g4
0144     be,pn   %xcc, sun4v_res_mondo_queue_empty
0145      nop
0146 
0147     /* Get &trap_block[smp_processor_id()] into %g3.  */
0148     ldxa    [%g0] ASI_SCRATCHPAD, %g3
0149     sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
0150 
0151     /* Get RES mondo queue base phys address into %g5.  */
0152     ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
0153 
0154     /* Get RES kernel buffer base phys address into %g7.  */
0155     ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
0156 
0157     /* If the first word is non-zero, queue is full.  */
0158     ldxa    [%g7 + %g2] ASI_PHYS_USE_EC, %g1
0159     brnz,pn %g1, sun4v_res_mondo_queue_full
0160      nop
0161 
0162     lduw    [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
0163 
0164     /* Remember this entry's offset in %g1.  */
0165     mov %g2, %g1
0166 
0167     /* Copy 64-byte queue entry into kernel buffer.  */
0168     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0169     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0170     add %g2, 0x08, %g2
0171     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0172     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0173     add %g2, 0x08, %g2
0174     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0175     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0176     add %g2, 0x08, %g2
0177     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0178     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0179     add %g2, 0x08, %g2
0180     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0181     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0182     add %g2, 0x08, %g2
0183     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0184     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0185     add %g2, 0x08, %g2
0186     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0187     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0188     add %g2, 0x08, %g2
0189     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0190     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0191     add %g2, 0x08, %g2
0192 
0193     /* Update queue head pointer.  */
0194     and %g2, %g4, %g2
0195 
0196     mov INTRQ_RESUM_MONDO_HEAD, %g4
0197     stxa    %g2, [%g4] ASI_QUEUE
0198     membar  #Sync
0199 
0200     /* Disable interrupts and save register state so we can call
0201      * C code.  The etrap handling will leave %g4 in %l4 for us
0202      * when it's done.
0203      */
0204     rdpr    %pil, %g2
0205     wrpr    %g0, PIL_NORMAL_MAX, %pil
0206     mov %g1, %g4
0207     ba,pt   %xcc, etrap_irq
0208      rd %pc, %g7
0209 #ifdef CONFIG_TRACE_IRQFLAGS
0210     call        trace_hardirqs_off
0211      nop
0212 #endif
0213     /* Log the event.  */
0214     add %sp, PTREGS_OFF, %o0
0215     call    sun4v_resum_error
0216      mov    %l4, %o1
0217 
0218     /* Return from trap.  */
0219     ba,pt   %xcc, rtrap_irq
0220      nop
0221 
0222 sun4v_res_mondo_queue_empty:
0223     retry
0224 
0225 sun4v_res_mondo_queue_full:
0226     /* The queue is full, consolidate our damage by setting
0227      * the head equal to the tail.  We'll just trap again otherwise.
0228      * Call C code to log the event.
0229      */
0230     mov INTRQ_RESUM_MONDO_HEAD, %g2
0231     stxa    %g4, [%g2] ASI_QUEUE
0232     membar  #Sync
0233 
0234     rdpr    %pil, %g2
0235     wrpr    %g0, PIL_NORMAL_MAX, %pil
0236     ba,pt   %xcc, etrap_irq
0237      rd %pc, %g7
0238 #ifdef CONFIG_TRACE_IRQFLAGS
0239     call        trace_hardirqs_off
0240      nop
0241 #endif
0242     call    sun4v_resum_overflow
0243      add    %sp, PTREGS_OFF, %o0
0244 
0245     ba,pt   %xcc, rtrap_irq
0246      nop
0247 
0248 sun4v_nonres_mondo:
0249     /* Head offset in %g2, tail offset in %g4.  */
0250     mov INTRQ_NONRESUM_MONDO_HEAD, %g2
0251     ldxa    [%g2] ASI_QUEUE, %g2
0252     mov INTRQ_NONRESUM_MONDO_TAIL, %g4
0253     ldxa    [%g4] ASI_QUEUE, %g4
0254     cmp %g2, %g4
0255     be,pn   %xcc, sun4v_nonres_mondo_queue_empty
0256      nop
0257 
0258     /* Get &trap_block[smp_processor_id()] into %g3.  */
0259     ldxa    [%g0] ASI_SCRATCHPAD, %g3
0260     sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
0261 
0262     /* Get RES mondo queue base phys address into %g5.  */
0263     ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
0264 
0265     /* Get RES kernel buffer base phys address into %g7.  */
0266     ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
0267 
0268     /* If the first word is non-zero, queue is full.  */
0269     ldxa    [%g7 + %g2] ASI_PHYS_USE_EC, %g1
0270     brnz,pn %g1, sun4v_nonres_mondo_queue_full
0271      nop
0272 
0273     lduw    [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
0274 
0275     /* Remember this entry's offset in %g1.  */
0276     mov %g2, %g1
0277 
0278     /* Copy 64-byte queue entry into kernel buffer.  */
0279     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0280     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0281     add %g2, 0x08, %g2
0282     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0283     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0284     add %g2, 0x08, %g2
0285     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0286     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0287     add %g2, 0x08, %g2
0288     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0289     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0290     add %g2, 0x08, %g2
0291     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0292     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0293     add %g2, 0x08, %g2
0294     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0295     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0296     add %g2, 0x08, %g2
0297     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0298     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0299     add %g2, 0x08, %g2
0300     ldxa    [%g5 + %g2] ASI_PHYS_USE_EC, %g3
0301     stxa    %g3, [%g7 + %g2] ASI_PHYS_USE_EC
0302     add %g2, 0x08, %g2
0303 
0304     /* Update queue head pointer.  */
0305     and %g2, %g4, %g2
0306 
0307     mov INTRQ_NONRESUM_MONDO_HEAD, %g4
0308     stxa    %g2, [%g4] ASI_QUEUE
0309     membar  #Sync
0310 
0311     /* Disable interrupts and save register state so we can call
0312      * C code.  The etrap handling will leave %g4 in %l4 for us
0313      * when it's done.
0314      */
0315     rdpr    %pil, %g2
0316     wrpr    %g0, PIL_NORMAL_MAX, %pil
0317     mov %g1, %g4
0318     ba,pt   %xcc, etrap_irq
0319      rd %pc, %g7
0320 #ifdef CONFIG_TRACE_IRQFLAGS
0321     call        trace_hardirqs_off
0322      nop
0323 #endif
0324     /* Log the event.  */
0325     add %sp, PTREGS_OFF, %o0
0326     call    sun4v_nonresum_error
0327      mov    %l4, %o1
0328 
0329     /* Return from trap.  */
0330     ba,pt   %xcc, rtrap_irq
0331      nop
0332 
0333 sun4v_nonres_mondo_queue_empty:
0334     retry
0335 
0336 sun4v_nonres_mondo_queue_full:
0337     /* The queue is full, consolidate our damage by setting
0338      * the head equal to the tail.  We'll just trap again otherwise.
0339      * Call C code to log the event.
0340      */
0341     mov INTRQ_NONRESUM_MONDO_HEAD, %g2
0342     stxa    %g4, [%g2] ASI_QUEUE
0343     membar  #Sync
0344 
0345     rdpr    %pil, %g2
0346     wrpr    %g0, PIL_NORMAL_MAX, %pil
0347     ba,pt   %xcc, etrap_irq
0348      rd %pc, %g7
0349 #ifdef CONFIG_TRACE_IRQFLAGS
0350     call        trace_hardirqs_off
0351      nop
0352 #endif
0353     call    sun4v_nonresum_overflow
0354      add    %sp, PTREGS_OFF, %o0
0355 
0356     ba,pt   %xcc, rtrap_irq
0357      nop