0001
0002 #ifndef _ASM_X86_PARAVIRT_H
0003 #define _ASM_X86_PARAVIRT_H
0004
0005
0006
0007 #ifdef CONFIG_PARAVIRT
0008 #include <asm/pgtable_types.h>
0009 #include <asm/asm.h>
0010 #include <asm/nospec-branch.h>
0011
0012 #include <asm/paravirt_types.h>
0013
0014 #ifndef __ASSEMBLY__
0015 #include <linux/bug.h>
0016 #include <linux/types.h>
0017 #include <linux/cpumask.h>
0018 #include <linux/static_call_types.h>
0019 #include <asm/frame.h>
0020
0021 u64 dummy_steal_clock(int cpu);
0022 u64 dummy_sched_clock(void);
0023
0024 DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
0025 DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
0026
0027 void paravirt_set_sched_clock(u64 (*func)(void));
0028
0029 static inline u64 paravirt_sched_clock(void)
0030 {
0031 return static_call(pv_sched_clock)();
0032 }
0033
0034 struct static_key;
0035 extern struct static_key paravirt_steal_enabled;
0036 extern struct static_key paravirt_steal_rq_enabled;
0037
0038 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
0039 bool pv_is_native_spin_unlock(void);
0040 __visible bool __native_vcpu_is_preempted(long cpu);
0041 bool pv_is_native_vcpu_is_preempted(void);
0042
0043 static inline u64 paravirt_steal_clock(int cpu)
0044 {
0045 return static_call(pv_steal_clock)(cpu);
0046 }
0047
0048 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0049 void __init paravirt_set_cap(void);
0050 #endif
0051
0052
0053 static inline void slow_down_io(void)
0054 {
0055 PVOP_VCALL0(cpu.io_delay);
0056 #ifdef REALLY_SLOW_IO
0057 PVOP_VCALL0(cpu.io_delay);
0058 PVOP_VCALL0(cpu.io_delay);
0059 PVOP_VCALL0(cpu.io_delay);
0060 #endif
0061 }
0062
0063 void native_flush_tlb_local(void);
0064 void native_flush_tlb_global(void);
0065 void native_flush_tlb_one_user(unsigned long addr);
0066 void native_flush_tlb_multi(const struct cpumask *cpumask,
0067 const struct flush_tlb_info *info);
0068
0069 static inline void __flush_tlb_local(void)
0070 {
0071 PVOP_VCALL0(mmu.flush_tlb_user);
0072 }
0073
0074 static inline void __flush_tlb_global(void)
0075 {
0076 PVOP_VCALL0(mmu.flush_tlb_kernel);
0077 }
0078
0079 static inline void __flush_tlb_one_user(unsigned long addr)
0080 {
0081 PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
0082 }
0083
0084 static inline void __flush_tlb_multi(const struct cpumask *cpumask,
0085 const struct flush_tlb_info *info)
0086 {
0087 PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
0088 }
0089
0090 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
0091 {
0092 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
0093 }
0094
0095 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
0096 {
0097 PVOP_VCALL1(mmu.exit_mmap, mm);
0098 }
0099
0100 static inline void notify_page_enc_status_changed(unsigned long pfn,
0101 int npages, bool enc)
0102 {
0103 PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
0104 }
0105
0106 #ifdef CONFIG_PARAVIRT_XXL
0107 static inline void load_sp0(unsigned long sp0)
0108 {
0109 PVOP_VCALL1(cpu.load_sp0, sp0);
0110 }
0111
0112
0113 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
0114 unsigned int *ecx, unsigned int *edx)
0115 {
0116 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
0117 }
0118
0119
0120
0121
0122 static __always_inline unsigned long paravirt_get_debugreg(int reg)
0123 {
0124 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
0125 }
0126 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
0127 static __always_inline void set_debugreg(unsigned long val, int reg)
0128 {
0129 PVOP_VCALL2(cpu.set_debugreg, reg, val);
0130 }
0131
0132 static inline unsigned long read_cr0(void)
0133 {
0134 return PVOP_CALL0(unsigned long, cpu.read_cr0);
0135 }
0136
0137 static inline void write_cr0(unsigned long x)
0138 {
0139 PVOP_VCALL1(cpu.write_cr0, x);
0140 }
0141
0142 static __always_inline unsigned long read_cr2(void)
0143 {
0144 return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
0145 "mov %%cr2, %%rax;",
0146 ALT_NOT(X86_FEATURE_XENPV));
0147 }
0148
0149 static __always_inline void write_cr2(unsigned long x)
0150 {
0151 PVOP_VCALL1(mmu.write_cr2, x);
0152 }
0153
0154 static inline unsigned long __read_cr3(void)
0155 {
0156 return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
0157 "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
0158 }
0159
0160 static inline void write_cr3(unsigned long x)
0161 {
0162 PVOP_ALT_VCALL1(mmu.write_cr3, x,
0163 "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
0164 }
0165
0166 static inline void __write_cr4(unsigned long x)
0167 {
0168 PVOP_VCALL1(cpu.write_cr4, x);
0169 }
0170
0171 static inline void arch_safe_halt(void)
0172 {
0173 PVOP_VCALL0(irq.safe_halt);
0174 }
0175
0176 static inline void halt(void)
0177 {
0178 PVOP_VCALL0(irq.halt);
0179 }
0180
0181 static inline void wbinvd(void)
0182 {
0183 PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
0184 }
0185
0186 static inline u64 paravirt_read_msr(unsigned msr)
0187 {
0188 return PVOP_CALL1(u64, cpu.read_msr, msr);
0189 }
0190
0191 static inline void paravirt_write_msr(unsigned msr,
0192 unsigned low, unsigned high)
0193 {
0194 PVOP_VCALL3(cpu.write_msr, msr, low, high);
0195 }
0196
0197 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
0198 {
0199 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
0200 }
0201
0202 static inline int paravirt_write_msr_safe(unsigned msr,
0203 unsigned low, unsigned high)
0204 {
0205 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
0206 }
0207
0208 #define rdmsr(msr, val1, val2) \
0209 do { \
0210 u64 _l = paravirt_read_msr(msr); \
0211 val1 = (u32)_l; \
0212 val2 = _l >> 32; \
0213 } while (0)
0214
0215 #define wrmsr(msr, val1, val2) \
0216 do { \
0217 paravirt_write_msr(msr, val1, val2); \
0218 } while (0)
0219
0220 #define rdmsrl(msr, val) \
0221 do { \
0222 val = paravirt_read_msr(msr); \
0223 } while (0)
0224
0225 static inline void wrmsrl(unsigned msr, u64 val)
0226 {
0227 wrmsr(msr, (u32)val, (u32)(val>>32));
0228 }
0229
0230 #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
0231
0232
0233 #define rdmsr_safe(msr, a, b) \
0234 ({ \
0235 int _err; \
0236 u64 _l = paravirt_read_msr_safe(msr, &_err); \
0237 (*a) = (u32)_l; \
0238 (*b) = _l >> 32; \
0239 _err; \
0240 })
0241
0242 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
0243 {
0244 int err;
0245
0246 *p = paravirt_read_msr_safe(msr, &err);
0247 return err;
0248 }
0249
0250 static inline unsigned long long paravirt_read_pmc(int counter)
0251 {
0252 return PVOP_CALL1(u64, cpu.read_pmc, counter);
0253 }
0254
0255 #define rdpmc(counter, low, high) \
0256 do { \
0257 u64 _l = paravirt_read_pmc(counter); \
0258 low = (u32)_l; \
0259 high = _l >> 32; \
0260 } while (0)
0261
0262 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
0263
0264 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
0265 {
0266 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
0267 }
0268
0269 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
0270 {
0271 PVOP_VCALL2(cpu.free_ldt, ldt, entries);
0272 }
0273
0274 static inline void load_TR_desc(void)
0275 {
0276 PVOP_VCALL0(cpu.load_tr_desc);
0277 }
0278 static inline void load_gdt(const struct desc_ptr *dtr)
0279 {
0280 PVOP_VCALL1(cpu.load_gdt, dtr);
0281 }
0282 static inline void load_idt(const struct desc_ptr *dtr)
0283 {
0284 PVOP_VCALL1(cpu.load_idt, dtr);
0285 }
0286 static inline void set_ldt(const void *addr, unsigned entries)
0287 {
0288 PVOP_VCALL2(cpu.set_ldt, addr, entries);
0289 }
0290 static inline unsigned long paravirt_store_tr(void)
0291 {
0292 return PVOP_CALL0(unsigned long, cpu.store_tr);
0293 }
0294
0295 #define store_tr(tr) ((tr) = paravirt_store_tr())
0296 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
0297 {
0298 PVOP_VCALL2(cpu.load_tls, t, cpu);
0299 }
0300
0301 static inline void load_gs_index(unsigned int gs)
0302 {
0303 PVOP_VCALL1(cpu.load_gs_index, gs);
0304 }
0305
0306 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
0307 const void *desc)
0308 {
0309 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
0310 }
0311
0312 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
0313 void *desc, int type)
0314 {
0315 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
0316 }
0317
0318 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
0319 {
0320 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
0321 }
0322
0323 #ifdef CONFIG_X86_IOPL_IOPERM
0324 static inline void tss_invalidate_io_bitmap(void)
0325 {
0326 PVOP_VCALL0(cpu.invalidate_io_bitmap);
0327 }
0328
0329 static inline void tss_update_io_bitmap(void)
0330 {
0331 PVOP_VCALL0(cpu.update_io_bitmap);
0332 }
0333 #endif
0334
0335 static inline void paravirt_activate_mm(struct mm_struct *prev,
0336 struct mm_struct *next)
0337 {
0338 PVOP_VCALL2(mmu.activate_mm, prev, next);
0339 }
0340
0341 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
0342 struct mm_struct *mm)
0343 {
0344 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
0345 }
0346
0347 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
0348 {
0349 return PVOP_CALL1(int, mmu.pgd_alloc, mm);
0350 }
0351
0352 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
0353 {
0354 PVOP_VCALL2(mmu.pgd_free, mm, pgd);
0355 }
0356
0357 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
0358 {
0359 PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
0360 }
0361 static inline void paravirt_release_pte(unsigned long pfn)
0362 {
0363 PVOP_VCALL1(mmu.release_pte, pfn);
0364 }
0365
0366 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
0367 {
0368 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
0369 }
0370
0371 static inline void paravirt_release_pmd(unsigned long pfn)
0372 {
0373 PVOP_VCALL1(mmu.release_pmd, pfn);
0374 }
0375
0376 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
0377 {
0378 PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
0379 }
0380 static inline void paravirt_release_pud(unsigned long pfn)
0381 {
0382 PVOP_VCALL1(mmu.release_pud, pfn);
0383 }
0384
0385 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
0386 {
0387 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
0388 }
0389
0390 static inline void paravirt_release_p4d(unsigned long pfn)
0391 {
0392 PVOP_VCALL1(mmu.release_p4d, pfn);
0393 }
0394
0395 static inline pte_t __pte(pteval_t val)
0396 {
0397 return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
0398 "mov %%rdi, %%rax",
0399 ALT_NOT(X86_FEATURE_XENPV)) };
0400 }
0401
0402 static inline pteval_t pte_val(pte_t pte)
0403 {
0404 return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
0405 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0406 }
0407
0408 static inline pgd_t __pgd(pgdval_t val)
0409 {
0410 return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
0411 "mov %%rdi, %%rax",
0412 ALT_NOT(X86_FEATURE_XENPV)) };
0413 }
0414
0415 static inline pgdval_t pgd_val(pgd_t pgd)
0416 {
0417 return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
0418 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0419 }
0420
0421 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
0422 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
0423 pte_t *ptep)
0424 {
0425 pteval_t ret;
0426
0427 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
0428
0429 return (pte_t) { .pte = ret };
0430 }
0431
0432 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
0433 pte_t *ptep, pte_t old_pte, pte_t pte)
0434 {
0435
0436 PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
0437 }
0438
0439 static inline void set_pte(pte_t *ptep, pte_t pte)
0440 {
0441 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
0442 }
0443
0444 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
0445 {
0446 PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
0447 }
0448
0449 static inline pmd_t __pmd(pmdval_t val)
0450 {
0451 return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
0452 "mov %%rdi, %%rax",
0453 ALT_NOT(X86_FEATURE_XENPV)) };
0454 }
0455
0456 static inline pmdval_t pmd_val(pmd_t pmd)
0457 {
0458 return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
0459 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0460 }
0461
0462 static inline void set_pud(pud_t *pudp, pud_t pud)
0463 {
0464 PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
0465 }
0466
0467 static inline pud_t __pud(pudval_t val)
0468 {
0469 pudval_t ret;
0470
0471 ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
0472 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0473
0474 return (pud_t) { ret };
0475 }
0476
0477 static inline pudval_t pud_val(pud_t pud)
0478 {
0479 return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
0480 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0481 }
0482
0483 static inline void pud_clear(pud_t *pudp)
0484 {
0485 set_pud(pudp, native_make_pud(0));
0486 }
0487
0488 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
0489 {
0490 p4dval_t val = native_p4d_val(p4d);
0491
0492 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
0493 }
0494
0495 #if CONFIG_PGTABLE_LEVELS >= 5
0496
0497 static inline p4d_t __p4d(p4dval_t val)
0498 {
0499 p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
0500 "mov %%rdi, %%rax",
0501 ALT_NOT(X86_FEATURE_XENPV));
0502
0503 return (p4d_t) { ret };
0504 }
0505
0506 static inline p4dval_t p4d_val(p4d_t p4d)
0507 {
0508 return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
0509 "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
0510 }
0511
0512 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
0513 {
0514 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
0515 }
0516
0517 #define set_pgd(pgdp, pgdval) do { \
0518 if (pgtable_l5_enabled()) \
0519 __set_pgd(pgdp, pgdval); \
0520 else \
0521 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
0522 } while (0)
0523
0524 #define pgd_clear(pgdp) do { \
0525 if (pgtable_l5_enabled()) \
0526 set_pgd(pgdp, native_make_pgd(0)); \
0527 } while (0)
0528
0529 #endif
0530
0531 static inline void p4d_clear(p4d_t *p4dp)
0532 {
0533 set_p4d(p4dp, native_make_p4d(0));
0534 }
0535
0536 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
0537 {
0538 set_pte(ptep, pte);
0539 }
0540
0541 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
0542 pte_t *ptep)
0543 {
0544 set_pte(ptep, native_make_pte(0));
0545 }
0546
0547 static inline void pmd_clear(pmd_t *pmdp)
0548 {
0549 set_pmd(pmdp, native_make_pmd(0));
0550 }
0551
0552 #define __HAVE_ARCH_START_CONTEXT_SWITCH
0553 static inline void arch_start_context_switch(struct task_struct *prev)
0554 {
0555 PVOP_VCALL1(cpu.start_context_switch, prev);
0556 }
0557
0558 static inline void arch_end_context_switch(struct task_struct *next)
0559 {
0560 PVOP_VCALL1(cpu.end_context_switch, next);
0561 }
0562
0563 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
0564 static inline void arch_enter_lazy_mmu_mode(void)
0565 {
0566 PVOP_VCALL0(mmu.lazy_mode.enter);
0567 }
0568
0569 static inline void arch_leave_lazy_mmu_mode(void)
0570 {
0571 PVOP_VCALL0(mmu.lazy_mode.leave);
0572 }
0573
0574 static inline void arch_flush_lazy_mmu_mode(void)
0575 {
0576 PVOP_VCALL0(mmu.lazy_mode.flush);
0577 }
0578
0579 static inline void __set_fixmap(unsigned idx,
0580 phys_addr_t phys, pgprot_t flags)
0581 {
0582 pv_ops.mmu.set_fixmap(idx, phys, flags);
0583 }
0584 #endif
0585
0586 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
0587
0588 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
0589 u32 val)
0590 {
0591 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
0592 }
0593
0594 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
0595 {
0596 PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
0597 "movb $0, (%%" _ASM_ARG1 ");",
0598 ALT_NOT(X86_FEATURE_PVUNLOCK));
0599 }
0600
0601 static __always_inline void pv_wait(u8 *ptr, u8 val)
0602 {
0603 PVOP_VCALL2(lock.wait, ptr, val);
0604 }
0605
0606 static __always_inline void pv_kick(int cpu)
0607 {
0608 PVOP_VCALL1(lock.kick, cpu);
0609 }
0610
0611 static __always_inline bool pv_vcpu_is_preempted(long cpu)
0612 {
0613 return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
0614 "xor %%" _ASM_AX ", %%" _ASM_AX ";",
0615 ALT_NOT(X86_FEATURE_VCPUPREEMPT));
0616 }
0617
0618 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
0619 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
0620
0621 #endif
0622
0623 #ifdef CONFIG_X86_32
0624
0625 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
0626 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
0627 #else
0628
0629 #define PV_SAVE_ALL_CALLER_REGS \
0630 "push %rcx;" \
0631 "push %rdx;" \
0632 "push %rsi;" \
0633 "push %rdi;" \
0634 "push %r8;" \
0635 "push %r9;" \
0636 "push %r10;" \
0637 "push %r11;"
0638 #define PV_RESTORE_ALL_CALLER_REGS \
0639 "pop %r11;" \
0640 "pop %r10;" \
0641 "pop %r9;" \
0642 "pop %r8;" \
0643 "pop %rdi;" \
0644 "pop %rsi;" \
0645 "pop %rdx;" \
0646 "pop %rcx;"
0647 #endif
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
0662 #define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \
0663 extern typeof(func) __raw_callee_save_##func; \
0664 \
0665 asm(".pushsection " section ", \"ax\";" \
0666 ".globl " PV_THUNK_NAME(func) ";" \
0667 ".type " PV_THUNK_NAME(func) ", @function;" \
0668 PV_THUNK_NAME(func) ":" \
0669 ASM_ENDBR \
0670 FRAME_BEGIN \
0671 PV_SAVE_ALL_CALLER_REGS \
0672 "call " #func ";" \
0673 PV_RESTORE_ALL_CALLER_REGS \
0674 FRAME_END \
0675 ASM_RET \
0676 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
0677 ".popsection")
0678
0679 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
0680 __PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
0681
0682
0683 #define PV_CALLEE_SAVE(func) \
0684 ((struct paravirt_callee_save) { __raw_callee_save_##func })
0685
0686
0687 #define __PV_IS_CALLEE_SAVE(func) \
0688 ((struct paravirt_callee_save) { func })
0689
0690 #ifdef CONFIG_PARAVIRT_XXL
0691 static __always_inline unsigned long arch_local_save_flags(void)
0692 {
0693 return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
0694 ALT_NOT(X86_FEATURE_XENPV));
0695 }
0696
0697 static __always_inline void arch_local_irq_disable(void)
0698 {
0699 PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
0700 }
0701
0702 static __always_inline void arch_local_irq_enable(void)
0703 {
0704 PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
0705 }
0706
0707 static __always_inline unsigned long arch_local_irq_save(void)
0708 {
0709 unsigned long f;
0710
0711 f = arch_local_save_flags();
0712 arch_local_irq_disable();
0713 return f;
0714 }
0715 #endif
0716
0717
0718
0719 #undef PARAVIRT_CALL
0720 #undef __PVOP_CALL
0721 #undef __PVOP_VCALL
0722 #undef PVOP_VCALL0
0723 #undef PVOP_CALL0
0724 #undef PVOP_VCALL1
0725 #undef PVOP_CALL1
0726 #undef PVOP_VCALL2
0727 #undef PVOP_CALL2
0728 #undef PVOP_VCALL3
0729 #undef PVOP_CALL3
0730 #undef PVOP_VCALL4
0731 #undef PVOP_CALL4
0732
0733 extern void default_banner(void);
0734
0735 #else
0736
0737 #define _PVSITE(ptype, ops, word, algn) \
0738 771:; \
0739 ops; \
0740 772:; \
0741 .pushsection .parainstructions,"a"; \
0742 .align algn; \
0743 word 771b; \
0744 .byte ptype; \
0745 .byte 772b-771b; \
0746 .popsection
0747
0748
0749 #ifdef CONFIG_X86_64
0750 #ifdef CONFIG_PARAVIRT_XXL
0751
0752 #define PARA_PATCH(off) ((off) / 8)
0753 #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
0754 #define PARA_INDIRECT(addr) *addr(%rip)
0755
0756 #ifdef CONFIG_DEBUG_ENTRY
0757 .macro PARA_IRQ_save_fl
0758 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
0759 ANNOTATE_RETPOLINE_SAFE;
0760 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
0761 .endm
0762
0763 #define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
0764 ALT_NOT(X86_FEATURE_XENPV)
0765 #endif
0766 #endif
0767 #endif
0768
0769 #endif
0770 #else
0771 # define default_banner x86_init_noop
0772 #endif
0773
0774 #ifndef __ASSEMBLY__
0775 #ifndef CONFIG_PARAVIRT_XXL
0776 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
0777 struct mm_struct *mm)
0778 {
0779 }
0780 #endif
0781
0782 #ifndef CONFIG_PARAVIRT
0783 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
0784 {
0785 }
0786 #endif
0787
0788 #ifndef CONFIG_PARAVIRT_SPINLOCKS
0789 static inline void paravirt_set_cap(void)
0790 {
0791 }
0792 #endif
0793 #endif
0794 #endif