Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*  Paravirtualization interfaces
0003     Copyright (C) 2006 Rusty Russell IBM Corporation
0004 
0005 
0006     2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
0007 */
0008 
0009 #include <linux/errno.h>
0010 #include <linux/init.h>
0011 #include <linux/export.h>
0012 #include <linux/efi.h>
0013 #include <linux/bcd.h>
0014 #include <linux/highmem.h>
0015 #include <linux/kprobes.h>
0016 #include <linux/pgtable.h>
0017 #include <linux/static_call.h>
0018 
0019 #include <asm/bug.h>
0020 #include <asm/paravirt.h>
0021 #include <asm/debugreg.h>
0022 #include <asm/desc.h>
0023 #include <asm/setup.h>
0024 #include <asm/time.h>
0025 #include <asm/pgalloc.h>
0026 #include <asm/irq.h>
0027 #include <asm/delay.h>
0028 #include <asm/fixmap.h>
0029 #include <asm/apic.h>
0030 #include <asm/tlbflush.h>
0031 #include <asm/timer.h>
0032 #include <asm/special_insns.h>
0033 #include <asm/tlb.h>
0034 #include <asm/io_bitmap.h>
0035 
0036 /*
0037  * nop stub, which must not clobber anything *including the stack* to
0038  * avoid confusing the entry prologues.
0039  */
0040 extern void _paravirt_nop(void);
0041 asm (".pushsection .entry.text, \"ax\"\n"
0042      ".global _paravirt_nop\n"
0043      "_paravirt_nop:\n\t"
0044      ASM_ENDBR
0045      ASM_RET
0046      ".size _paravirt_nop, . - _paravirt_nop\n\t"
0047      ".type _paravirt_nop, @function\n\t"
0048      ".popsection");
0049 
0050 /* stub always returning 0. */
0051 asm (".pushsection .entry.text, \"ax\"\n"
0052      ".global paravirt_ret0\n"
0053      "paravirt_ret0:\n\t"
0054      ASM_ENDBR
0055      "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
0056      ASM_RET
0057      ".size paravirt_ret0, . - paravirt_ret0\n\t"
0058      ".type paravirt_ret0, @function\n\t"
0059      ".popsection");
0060 
0061 
0062 void __init default_banner(void)
0063 {
0064     printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
0065            pv_info.name);
0066 }
0067 
0068 /* Undefined instruction for dealing with missing ops pointers. */
0069 noinstr void paravirt_BUG(void)
0070 {
0071     BUG();
0072 }
0073 
0074 static unsigned paravirt_patch_call(void *insn_buff, const void *target,
0075                     unsigned long addr, unsigned len)
0076 {
0077     __text_gen_insn(insn_buff, CALL_INSN_OPCODE,
0078             (void *)addr, target, CALL_INSN_SIZE);
0079     return CALL_INSN_SIZE;
0080 }
0081 
0082 #ifdef CONFIG_PARAVIRT_XXL
0083 /* identity function, which can be inlined */
0084 u64 notrace _paravirt_ident_64(u64 x)
0085 {
0086     return x;
0087 }
0088 #endif
0089 
0090 DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
0091 
0092 void __init native_pv_lock_init(void)
0093 {
0094     if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
0095         static_branch_disable(&virt_spin_lock_key);
0096 }
0097 
0098 unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
0099                 unsigned int len)
0100 {
0101     /*
0102      * Neat trick to map patch type back to the call within the
0103      * corresponding structure.
0104      */
0105     void *opfunc = *((void **)&pv_ops + type);
0106     unsigned ret;
0107 
0108     if (opfunc == NULL)
0109         /* If there's no function, patch it with paravirt_BUG() */
0110         ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
0111     else if (opfunc == _paravirt_nop)
0112         ret = 0;
0113     else
0114         /* Otherwise call the function. */
0115         ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
0116 
0117     return ret;
0118 }
0119 
0120 struct static_key paravirt_steal_enabled;
0121 struct static_key paravirt_steal_rq_enabled;
0122 
0123 static u64 native_steal_clock(int cpu)
0124 {
0125     return 0;
0126 }
0127 
0128 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
0129 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
0130 
0131 void paravirt_set_sched_clock(u64 (*func)(void))
0132 {
0133     static_call_update(pv_sched_clock, func);
0134 }
0135 
0136 /* These are in entry.S */
0137 static struct resource reserve_ioports = {
0138     .start = 0,
0139     .end = IO_SPACE_LIMIT,
0140     .name = "paravirt-ioport",
0141     .flags = IORESOURCE_IO | IORESOURCE_BUSY,
0142 };
0143 
0144 /*
0145  * Reserve the whole legacy IO space to prevent any legacy drivers
0146  * from wasting time probing for their hardware.  This is a fairly
0147  * brute-force approach to disabling all non-virtual drivers.
0148  *
0149  * Note that this must be called very early to have any effect.
0150  */
0151 int paravirt_disable_iospace(void)
0152 {
0153     return request_resource(&ioport_resource, &reserve_ioports);
0154 }
0155 
0156 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
0157 
0158 static inline void enter_lazy(enum paravirt_lazy_mode mode)
0159 {
0160     BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
0161 
0162     this_cpu_write(paravirt_lazy_mode, mode);
0163 }
0164 
0165 static void leave_lazy(enum paravirt_lazy_mode mode)
0166 {
0167     BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
0168 
0169     this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
0170 }
0171 
0172 void paravirt_enter_lazy_mmu(void)
0173 {
0174     enter_lazy(PARAVIRT_LAZY_MMU);
0175 }
0176 
0177 void paravirt_leave_lazy_mmu(void)
0178 {
0179     leave_lazy(PARAVIRT_LAZY_MMU);
0180 }
0181 
0182 void paravirt_flush_lazy_mmu(void)
0183 {
0184     preempt_disable();
0185 
0186     if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
0187         arch_leave_lazy_mmu_mode();
0188         arch_enter_lazy_mmu_mode();
0189     }
0190 
0191     preempt_enable();
0192 }
0193 
0194 #ifdef CONFIG_PARAVIRT_XXL
0195 void paravirt_start_context_switch(struct task_struct *prev)
0196 {
0197     BUG_ON(preemptible());
0198 
0199     if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
0200         arch_leave_lazy_mmu_mode();
0201         set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
0202     }
0203     enter_lazy(PARAVIRT_LAZY_CPU);
0204 }
0205 
0206 void paravirt_end_context_switch(struct task_struct *next)
0207 {
0208     BUG_ON(preemptible());
0209 
0210     leave_lazy(PARAVIRT_LAZY_CPU);
0211 
0212     if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
0213         arch_enter_lazy_mmu_mode();
0214 }
0215 
0216 static noinstr unsigned long pv_native_read_cr2(void)
0217 {
0218     return native_read_cr2();
0219 }
0220 
0221 static noinstr void pv_native_write_cr2(unsigned long val)
0222 {
0223     native_write_cr2(val);
0224 }
0225 
0226 static noinstr unsigned long pv_native_get_debugreg(int regno)
0227 {
0228     return native_get_debugreg(regno);
0229 }
0230 
0231 static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
0232 {
0233     native_set_debugreg(regno, val);
0234 }
0235 
0236 static noinstr void pv_native_irq_enable(void)
0237 {
0238     native_irq_enable();
0239 }
0240 
0241 static noinstr void pv_native_irq_disable(void)
0242 {
0243     native_irq_disable();
0244 }
0245 #endif
0246 
0247 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
0248 {
0249     if (in_interrupt())
0250         return PARAVIRT_LAZY_NONE;
0251 
0252     return this_cpu_read(paravirt_lazy_mode);
0253 }
0254 
0255 struct pv_info pv_info = {
0256     .name = "bare hardware",
0257 #ifdef CONFIG_PARAVIRT_XXL
0258     .extra_user_64bit_cs = __USER_CS,
0259 #endif
0260 };
0261 
0262 /* 64-bit pagetable entries */
0263 #define PTE_IDENT   __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
0264 
0265 struct paravirt_patch_template pv_ops = {
0266     /* Cpu ops. */
0267     .cpu.io_delay       = native_io_delay,
0268 
0269 #ifdef CONFIG_PARAVIRT_XXL
0270     .cpu.cpuid      = native_cpuid,
0271     .cpu.get_debugreg   = pv_native_get_debugreg,
0272     .cpu.set_debugreg   = pv_native_set_debugreg,
0273     .cpu.read_cr0       = native_read_cr0,
0274     .cpu.write_cr0      = native_write_cr0,
0275     .cpu.write_cr4      = native_write_cr4,
0276     .cpu.wbinvd     = native_wbinvd,
0277     .cpu.read_msr       = native_read_msr,
0278     .cpu.write_msr      = native_write_msr,
0279     .cpu.read_msr_safe  = native_read_msr_safe,
0280     .cpu.write_msr_safe = native_write_msr_safe,
0281     .cpu.read_pmc       = native_read_pmc,
0282     .cpu.load_tr_desc   = native_load_tr_desc,
0283     .cpu.set_ldt        = native_set_ldt,
0284     .cpu.load_gdt       = native_load_gdt,
0285     .cpu.load_idt       = native_load_idt,
0286     .cpu.store_tr       = native_store_tr,
0287     .cpu.load_tls       = native_load_tls,
0288     .cpu.load_gs_index  = native_load_gs_index,
0289     .cpu.write_ldt_entry    = native_write_ldt_entry,
0290     .cpu.write_gdt_entry    = native_write_gdt_entry,
0291     .cpu.write_idt_entry    = native_write_idt_entry,
0292 
0293     .cpu.alloc_ldt      = paravirt_nop,
0294     .cpu.free_ldt       = paravirt_nop,
0295 
0296     .cpu.load_sp0       = native_load_sp0,
0297 
0298 #ifdef CONFIG_X86_IOPL_IOPERM
0299     .cpu.invalidate_io_bitmap   = native_tss_invalidate_io_bitmap,
0300     .cpu.update_io_bitmap       = native_tss_update_io_bitmap,
0301 #endif
0302 
0303     .cpu.start_context_switch   = paravirt_nop,
0304     .cpu.end_context_switch     = paravirt_nop,
0305 
0306     /* Irq ops. */
0307     .irq.save_fl        = __PV_IS_CALLEE_SAVE(native_save_fl),
0308     .irq.irq_disable    = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
0309     .irq.irq_enable     = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
0310     .irq.safe_halt      = native_safe_halt,
0311     .irq.halt       = native_halt,
0312 #endif /* CONFIG_PARAVIRT_XXL */
0313 
0314     /* Mmu ops. */
0315     .mmu.flush_tlb_user = native_flush_tlb_local,
0316     .mmu.flush_tlb_kernel   = native_flush_tlb_global,
0317     .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
0318     .mmu.flush_tlb_multi    = native_flush_tlb_multi,
0319     .mmu.tlb_remove_table   =
0320             (void (*)(struct mmu_gather *, void *))tlb_remove_page,
0321 
0322     .mmu.exit_mmap      = paravirt_nop,
0323     .mmu.notify_page_enc_status_changed = paravirt_nop,
0324 
0325 #ifdef CONFIG_PARAVIRT_XXL
0326     .mmu.read_cr2       = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
0327     .mmu.write_cr2      = pv_native_write_cr2,
0328     .mmu.read_cr3       = __native_read_cr3,
0329     .mmu.write_cr3      = native_write_cr3,
0330 
0331     .mmu.pgd_alloc      = __paravirt_pgd_alloc,
0332     .mmu.pgd_free       = paravirt_nop,
0333 
0334     .mmu.alloc_pte      = paravirt_nop,
0335     .mmu.alloc_pmd      = paravirt_nop,
0336     .mmu.alloc_pud      = paravirt_nop,
0337     .mmu.alloc_p4d      = paravirt_nop,
0338     .mmu.release_pte    = paravirt_nop,
0339     .mmu.release_pmd    = paravirt_nop,
0340     .mmu.release_pud    = paravirt_nop,
0341     .mmu.release_p4d    = paravirt_nop,
0342 
0343     .mmu.set_pte        = native_set_pte,
0344     .mmu.set_pmd        = native_set_pmd,
0345 
0346     .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
0347     .mmu.ptep_modify_prot_commit    = __ptep_modify_prot_commit,
0348 
0349     .mmu.set_pud        = native_set_pud,
0350 
0351     .mmu.pmd_val        = PTE_IDENT,
0352     .mmu.make_pmd       = PTE_IDENT,
0353 
0354     .mmu.pud_val        = PTE_IDENT,
0355     .mmu.make_pud       = PTE_IDENT,
0356 
0357     .mmu.set_p4d        = native_set_p4d,
0358 
0359 #if CONFIG_PGTABLE_LEVELS >= 5
0360     .mmu.p4d_val        = PTE_IDENT,
0361     .mmu.make_p4d       = PTE_IDENT,
0362 
0363     .mmu.set_pgd        = native_set_pgd,
0364 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
0365 
0366     .mmu.pte_val        = PTE_IDENT,
0367     .mmu.pgd_val        = PTE_IDENT,
0368 
0369     .mmu.make_pte       = PTE_IDENT,
0370     .mmu.make_pgd       = PTE_IDENT,
0371 
0372     .mmu.dup_mmap       = paravirt_nop,
0373     .mmu.activate_mm    = paravirt_nop,
0374 
0375     .mmu.lazy_mode = {
0376         .enter      = paravirt_nop,
0377         .leave      = paravirt_nop,
0378         .flush      = paravirt_nop,
0379     },
0380 
0381     .mmu.set_fixmap     = native_set_fixmap,
0382 #endif /* CONFIG_PARAVIRT_XXL */
0383 
0384 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
0385     /* Lock ops. */
0386 #ifdef CONFIG_SMP
0387     .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
0388     .lock.queued_spin_unlock    =
0389                 PV_CALLEE_SAVE(__native_queued_spin_unlock),
0390     .lock.wait          = paravirt_nop,
0391     .lock.kick          = paravirt_nop,
0392     .lock.vcpu_is_preempted     =
0393                 PV_CALLEE_SAVE(__native_vcpu_is_preempted),
0394 #endif /* SMP */
0395 #endif
0396 };
0397 
0398 #ifdef CONFIG_PARAVIRT_XXL
0399 NOKPROBE_SYMBOL(native_load_idt);
0400 #endif
0401 
0402 EXPORT_SYMBOL(pv_ops);
0403 EXPORT_SYMBOL_GPL(pv_info);