0001
0002 #ifndef _ASM_X86_DESC_H
0003 #define _ASM_X86_DESC_H
0004
0005 #include <asm/desc_defs.h>
0006 #include <asm/ldt.h>
0007 #include <asm/mmu.h>
0008 #include <asm/fixmap.h>
0009 #include <asm/irq_vectors.h>
0010 #include <asm/cpu_entry_area.h>
0011
0012 #include <linux/debug_locks.h>
0013 #include <linux/smp.h>
0014 #include <linux/percpu.h>
0015
0016 static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
0017 {
0018 desc->limit0 = info->limit & 0x0ffff;
0019
0020 desc->base0 = (info->base_addr & 0x0000ffff);
0021 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
0022
0023 desc->type = (info->read_exec_only ^ 1) << 1;
0024 desc->type |= info->contents << 2;
0025
0026 desc->type |= 1;
0027
0028 desc->s = 1;
0029 desc->dpl = 0x3;
0030 desc->p = info->seg_not_present ^ 1;
0031 desc->limit1 = (info->limit & 0xf0000) >> 16;
0032 desc->avl = info->useable;
0033 desc->d = info->seg_32bit;
0034 desc->g = info->limit_in_pages;
0035
0036 desc->base2 = (info->base_addr & 0xff000000) >> 24;
0037
0038
0039
0040
0041 desc->l = 0;
0042 }
0043
0044 struct gdt_page {
0045 struct desc_struct gdt[GDT_ENTRIES];
0046 } __attribute__((aligned(PAGE_SIZE)));
0047
0048 DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
0049
0050
0051 static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
0052 {
0053 return per_cpu(gdt_page, cpu).gdt;
0054 }
0055
0056
0057 static inline struct desc_struct *get_current_gdt_rw(void)
0058 {
0059 return this_cpu_ptr(&gdt_page)->gdt;
0060 }
0061
0062
0063 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
0064 {
0065 return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
0066 }
0067
0068
0069 static inline struct desc_struct *get_current_gdt_ro(void)
0070 {
0071 return get_cpu_gdt_ro(smp_processor_id());
0072 }
0073
0074
0075 static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
0076 {
0077 return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
0078 }
0079
0080 static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
0081 unsigned dpl, unsigned ist, unsigned seg)
0082 {
0083 gate->offset_low = (u16) func;
0084 gate->bits.p = 1;
0085 gate->bits.dpl = dpl;
0086 gate->bits.zero = 0;
0087 gate->bits.type = type;
0088 gate->offset_middle = (u16) (func >> 16);
0089 #ifdef CONFIG_X86_64
0090 gate->segment = __KERNEL_CS;
0091 gate->bits.ist = ist;
0092 gate->reserved = 0;
0093 gate->offset_high = (u32) (func >> 32);
0094 #else
0095 gate->segment = seg;
0096 gate->bits.ist = 0;
0097 #endif
0098 }
0099
0100 static inline int desc_empty(const void *ptr)
0101 {
0102 const u32 *desc = ptr;
0103
0104 return !(desc[0] | desc[1]);
0105 }
0106
0107 #ifdef CONFIG_PARAVIRT_XXL
0108 #include <asm/paravirt.h>
0109 #else
0110 #define load_TR_desc() native_load_tr_desc()
0111 #define load_gdt(dtr) native_load_gdt(dtr)
0112 #define load_idt(dtr) native_load_idt(dtr)
0113 #define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
0114 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
0115
0116 #define store_gdt(dtr) native_store_gdt(dtr)
0117 #define store_tr(tr) (tr = native_store_tr())
0118
0119 #define load_TLS(t, cpu) native_load_tls(t, cpu)
0120 #define set_ldt native_set_ldt
0121
0122 #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
0123 #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
0124 #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
0125
0126 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
0127 {
0128 }
0129
0130 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
0131 {
0132 }
0133 #endif
0134
0135 #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
0136
0137 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
0138 {
0139 memcpy(&idt[entry], gate, sizeof(*gate));
0140 }
0141
0142 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
0143 {
0144 memcpy(&ldt[entry], desc, 8);
0145 }
0146
0147 static inline void
0148 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
0149 {
0150 unsigned int size;
0151
0152 switch (type) {
0153 case DESC_TSS: size = sizeof(tss_desc); break;
0154 case DESC_LDT: size = sizeof(ldt_desc); break;
0155 default: size = sizeof(*gdt); break;
0156 }
0157
0158 memcpy(&gdt[entry], desc, size);
0159 }
0160
0161 static inline void set_tssldt_descriptor(void *d, unsigned long addr,
0162 unsigned type, unsigned size)
0163 {
0164 struct ldttss_desc *desc = d;
0165
0166 memset(desc, 0, sizeof(*desc));
0167
0168 desc->limit0 = (u16) size;
0169 desc->base0 = (u16) addr;
0170 desc->base1 = (addr >> 16) & 0xFF;
0171 desc->type = type;
0172 desc->p = 1;
0173 desc->limit1 = (size >> 16) & 0xF;
0174 desc->base2 = (addr >> 24) & 0xFF;
0175 #ifdef CONFIG_X86_64
0176 desc->base3 = (u32) (addr >> 32);
0177 #endif
0178 }
0179
0180 static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
0181 {
0182 struct desc_struct *d = get_cpu_gdt_rw(cpu);
0183 tss_desc tss;
0184
0185 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
0186 __KERNEL_TSS_LIMIT);
0187 write_gdt_entry(d, entry, &tss, DESC_TSS);
0188 }
0189
0190 #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
0191
0192 static inline void native_set_ldt(const void *addr, unsigned int entries)
0193 {
0194 if (likely(entries == 0))
0195 asm volatile("lldt %w0"::"q" (0));
0196 else {
0197 unsigned cpu = smp_processor_id();
0198 ldt_desc ldt;
0199
0200 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
0201 entries * LDT_ENTRY_SIZE - 1);
0202 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
0203 &ldt, DESC_LDT);
0204 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
0205 }
0206 }
0207
0208 static inline void native_load_gdt(const struct desc_ptr *dtr)
0209 {
0210 asm volatile("lgdt %0"::"m" (*dtr));
0211 }
0212
0213 static __always_inline void native_load_idt(const struct desc_ptr *dtr)
0214 {
0215 asm volatile("lidt %0"::"m" (*dtr));
0216 }
0217
0218 static inline void native_store_gdt(struct desc_ptr *dtr)
0219 {
0220 asm volatile("sgdt %0":"=m" (*dtr));
0221 }
0222
0223 static inline void store_idt(struct desc_ptr *dtr)
0224 {
0225 asm volatile("sidt %0":"=m" (*dtr));
0226 }
0227
0228 static inline void native_gdt_invalidate(void)
0229 {
0230 const struct desc_ptr invalid_gdt = {
0231 .address = 0,
0232 .size = 0
0233 };
0234
0235 native_load_gdt(&invalid_gdt);
0236 }
0237
0238 static inline void native_idt_invalidate(void)
0239 {
0240 const struct desc_ptr invalid_idt = {
0241 .address = 0,
0242 .size = 0
0243 };
0244
0245 native_load_idt(&invalid_idt);
0246 }
0247
0248
0249
0250
0251
0252
0253 #ifdef CONFIG_X86_64
0254 static inline void native_load_tr_desc(void)
0255 {
0256 struct desc_ptr gdt;
0257 int cpu = raw_smp_processor_id();
0258 bool restore = 0;
0259 struct desc_struct *fixmap_gdt;
0260
0261 native_store_gdt(&gdt);
0262 fixmap_gdt = get_cpu_gdt_ro(cpu);
0263
0264
0265
0266
0267
0268 if (gdt.address == (unsigned long)fixmap_gdt) {
0269 load_direct_gdt(cpu);
0270 restore = 1;
0271 }
0272 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
0273 if (restore)
0274 load_fixmap_gdt(cpu);
0275 }
0276 #else
0277 static inline void native_load_tr_desc(void)
0278 {
0279 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
0280 }
0281 #endif
0282
0283 static inline unsigned long native_store_tr(void)
0284 {
0285 unsigned long tr;
0286
0287 asm volatile("str %0":"=r" (tr));
0288
0289 return tr;
0290 }
0291
0292 static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
0293 {
0294 struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
0295 unsigned int i;
0296
0297 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
0298 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
0299 }
0300
0301 DECLARE_PER_CPU(bool, __tss_limit_invalid);
0302
0303 static inline void force_reload_TR(void)
0304 {
0305 struct desc_struct *d = get_current_gdt_rw();
0306 tss_desc tss;
0307
0308 memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc));
0309
0310
0311
0312
0313
0314 tss.type = DESC_TSS;
0315 write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
0316
0317 load_TR_desc();
0318 this_cpu_write(__tss_limit_invalid, false);
0319 }
0320
0321
0322
0323
0324
0325
0326 static inline void refresh_tss_limit(void)
0327 {
0328 DEBUG_LOCKS_WARN_ON(preemptible());
0329
0330 if (unlikely(this_cpu_read(__tss_limit_invalid)))
0331 force_reload_TR();
0332 }
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 static inline void invalidate_tss_limit(void)
0344 {
0345 DEBUG_LOCKS_WARN_ON(preemptible());
0346
0347 if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
0348 force_reload_TR();
0349 else
0350 this_cpu_write(__tss_limit_invalid, true);
0351 }
0352
0353
0354 #define LDT_empty(info) \
0355 ((info)->base_addr == 0 && \
0356 (info)->limit == 0 && \
0357 (info)->contents == 0 && \
0358 (info)->read_exec_only == 1 && \
0359 (info)->seg_32bit == 0 && \
0360 (info)->limit_in_pages == 0 && \
0361 (info)->seg_not_present == 1 && \
0362 (info)->useable == 0)
0363
0364
0365 static inline bool LDT_zero(const struct user_desc *info)
0366 {
0367 return (info->base_addr == 0 &&
0368 info->limit == 0 &&
0369 info->contents == 0 &&
0370 info->read_exec_only == 0 &&
0371 info->seg_32bit == 0 &&
0372 info->limit_in_pages == 0 &&
0373 info->seg_not_present == 0 &&
0374 info->useable == 0);
0375 }
0376
0377 static inline void clear_LDT(void)
0378 {
0379 set_ldt(NULL, 0);
0380 }
0381
0382 static inline unsigned long get_desc_base(const struct desc_struct *desc)
0383 {
0384 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
0385 }
0386
0387 static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
0388 {
0389 desc->base0 = base & 0xffff;
0390 desc->base1 = (base >> 16) & 0xff;
0391 desc->base2 = (base >> 24) & 0xff;
0392 }
0393
0394 static inline unsigned long get_desc_limit(const struct desc_struct *desc)
0395 {
0396 return desc->limit0 | (desc->limit1 << 16);
0397 }
0398
0399 static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
0400 {
0401 desc->limit0 = limit & 0xffff;
0402 desc->limit1 = (limit >> 16) & 0xf;
0403 }
0404
0405 void alloc_intr_gate(unsigned int n, const void *addr);
0406
0407 static inline void init_idt_data(struct idt_data *data, unsigned int n,
0408 const void *addr)
0409 {
0410 BUG_ON(n > 0xFF);
0411
0412 memset(data, 0, sizeof(*data));
0413 data->vector = n;
0414 data->addr = addr;
0415 data->segment = __KERNEL_CS;
0416 data->bits.type = GATE_INTERRUPT;
0417 data->bits.p = 1;
0418 }
0419
0420 static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
0421 {
0422 unsigned long addr = (unsigned long) d->addr;
0423
0424 gate->offset_low = (u16) addr;
0425 gate->segment = (u16) d->segment;
0426 gate->bits = d->bits;
0427 gate->offset_middle = (u16) (addr >> 16);
0428 #ifdef CONFIG_X86_64
0429 gate->offset_high = (u32) (addr >> 32);
0430 gate->reserved = 0;
0431 #endif
0432 }
0433
0434 extern unsigned long system_vectors[];
0435
0436 extern void load_current_idt(void);
0437 extern void idt_setup_early_handler(void);
0438 extern void idt_setup_early_traps(void);
0439 extern void idt_setup_traps(void);
0440 extern void idt_setup_apic_and_irq_gates(void);
0441 extern bool idt_is_f00f_address(unsigned long address);
0442
0443 #ifdef CONFIG_X86_64
0444 extern void idt_setup_early_pf(void);
0445 #else
0446 static inline void idt_setup_early_pf(void) { }
0447 #endif
0448
0449 extern void idt_invalidate(void);
0450
0451 #endif