diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/desc.h | 57 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/mpspec.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/msr.h | 22 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/prom.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/segment.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/trace/exceptions.h | 52 | ||||
-rw-r--r-- | arch/x86/include/asm/traps.h | 20 | ||||
-rw-r--r-- | arch/x86/include/asm/x86_init.h | 3 |
13 files changed, 143 insertions, 52 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index b1977bad5435..c8c1e700c26e 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -26,6 +26,7 @@ #include <acpi/pdc_intel.h> #include <asm/numa.h> +#include <asm/fixmap.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mpspec.h> diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index b90e5dfeee46..50d033a8947d 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -327,10 +327,25 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate) { write_idt_entry(trace_idt_table, entry, gate); } + +static inline void _trace_set_gate(int gate, unsigned type, void *addr, + unsigned dpl, unsigned ist, unsigned seg) +{ + gate_desc s; + + pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); + /* + * does not need to be atomic because it is only done once at + * setup time + */ + write_trace_idt_entry(gate, &s); +} #else static inline void write_trace_idt_entry(int entry, const gate_desc *gate) { } + +#define _trace_set_gate(gate, type, addr, dpl, ist, seg) #endif static inline void _set_gate(int gate, unsigned type, void *addr, @@ -353,11 +368,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr, * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */ -static inline void set_intr_gate(unsigned int n, void *addr) -{ - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); -} +#define set_intr_gate(n, addr) \ + do { \ + BUG_ON((unsigned)n > 0xFF); \ + _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \ + __KERNEL_CS); \ + _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\ + 0, 0, __KERNEL_CS); \ + } while (0) extern int first_system_vector; /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ @@ -374,37 +392,10 @@ static inline void alloc_system_vector(int vector) } } -#ifdef CONFIG_TRACING -static inline void trace_set_intr_gate(unsigned int gate, void *addr) -{ - gate_desc s; - - pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS); - write_idt_entry(trace_idt_table, gate, &s); -} - -static inline void __trace_alloc_intr_gate(unsigned int n, void *addr) -{ - trace_set_intr_gate(n, addr); -} -#else -static inline void trace_set_intr_gate(unsigned int gate, void *addr) -{ -} - -#define __trace_alloc_intr_gate(n, addr) -#endif - -static inline void __alloc_intr_gate(unsigned int n, void *addr) -{ - set_intr_gate(n, addr); -} - #define alloc_intr_gate(n, addr) \ do { \ alloc_system_vector(n); \ - __alloc_intr_gate(n, addr); \ - __trace_alloc_intr_gate(n, trace_##addr); \ + set_intr_gate(n, addr); \ } while (0) /* diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 4d0bda7b11e3..c49a613c6452 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -365,7 +365,7 @@ static inline void drop_fpu(struct task_struct *tsk) * Forget coprocessor state.. */ preempt_disable(); - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; __drop_fpu(tsk); clear_used_math(); preempt_enable(); @@ -424,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta * or if the past 5 consecutive context-switches used math. */ fpu.preload = tsk_used_math(new) && (use_eager_fpu() || - new->fpu_counter > 5); + new->thread.fpu_counter > 5); if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) cpu = ~0; @@ -433,16 +433,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta /* Don't change CR0.TS if we just switch! */ if (fpu.preload) { - new->fpu_counter++; + new->thread.fpu_counter++; __thread_set_has_fpu(new); prefetch(new->thread.fpu.state); } else if (!use_eager_fpu()) stts(); } else { - old->fpu_counter = 0; + old->thread.fpu_counter = 0; old->thread.fpu.last_cpu = ~0; if (fpu.preload) { - new->fpu_counter++; + new->thread.fpu_counter++; if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) fpu.preload = 0; else diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 92b3bae08b74..cba45d99ac1a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -187,6 +187,9 @@ extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); +#ifdef CONFIG_TRACING +#define trace_interrupt interrupt +#endif typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 626cf70082d7..3142a94c7b4b 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { } #define default_get_smp_config x86_init_uint_noop #endif -void generic_processor_info(int apicid, int version); +int generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index cb7502852acb..e139b13f2a33 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs); #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); #else /* CONFIG_SMP */ @@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) wrmsr(msr_no, l, h); return 0; } +static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + rdmsrl(msr_no, *q); + return 0; +} +static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + wrmsrl(msr_no, q); + return 0; +} static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { @@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { return wrmsr_safe(msr_no, l, h); } +static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + return rdmsrl_safe(msr_no, q); +} +static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + return wrmsrl_safe(msr_no, q); +} static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return rdmsr_safe_regs(regs); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b3e18f800302..94220d14d5cc 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -378,9 +378,6 @@ do { \ #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) -#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) -#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) -#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) #define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) #define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) @@ -400,9 +397,6 @@ do { \ #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) -#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) -#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) -#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) @@ -447,7 +441,6 @@ do { \ #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) -#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) @@ -457,7 +450,6 @@ do { \ #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) -#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 987c75ecc334..7b034a4057f9 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -488,6 +488,15 @@ struct thread_struct { unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; /* diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index bade6ac3b14f..fbeb06ed0eaa 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -39,10 +39,5 @@ static inline void x86_dtb_init(void) { } extern char cmd_line[COMMAND_LINE_SIZE]; -#define pci_address_to_pio pci_address_to_pio -unsigned long pci_address_to_pio(phys_addr_t addr); - -#define HAVE_ARCH_DEVTREE_FIXUPS - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index c48a95035a77..6f1c3a8a33ab 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -214,6 +214,9 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; +#ifdef CONFIG_TRACING +#define trace_early_idt_handlers early_idt_handlers +#endif /* * Load a segment. Fall back on loading the zero diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h new file mode 100644 index 000000000000..2fbc66c7885b --- /dev/null +++ b/arch/x86/include/asm/trace/exceptions.h @@ -0,0 +1,52 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM exceptions + +#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_FAULT_H + +#include <linux/tracepoint.h> + +extern void trace_irq_vector_regfunc(void); +extern void trace_irq_vector_unregfunc(void); + +DECLARE_EVENT_CLASS(x86_exceptions, + + TP_PROTO(unsigned long address, struct pt_regs *regs, + unsigned long error_code), + + TP_ARGS(address, regs, error_code), + + TP_STRUCT__entry( + __field( unsigned long, address ) + __field( unsigned long, ip ) + __field( unsigned long, error_code ) + ), + + TP_fast_assign( + __entry->address = address; + __entry->ip = regs->ip; + __entry->error_code = error_code; + ), + + TP_printk("address=%pf ip=%pf error_code=0x%lx", + (void *)__entry->address, (void *)__entry->ip, + __entry->error_code) ); + +#define DEFINE_PAGE_FAULT_EVENT(name) \ +DEFINE_EVENT_FN(x86_exceptions, name, \ + TP_PROTO(unsigned long address, struct pt_regs *regs, \ + unsigned long error_code), \ + TP_ARGS(address, regs, error_code), \ + trace_irq_vector_regfunc, \ + trace_irq_vector_unregfunc); + +DEFINE_PAGE_FAULT_EVENT(page_fault_user); +DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE exceptions +#endif /* _TRACE_PAGE_FAULT_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 7036cb60cd87..58d66fe06b61 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -37,6 +37,23 @@ asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); +#ifdef CONFIG_TRACING +asmlinkage void trace_page_fault(void); +#define trace_divide_error divide_error +#define trace_bounds bounds +#define trace_invalid_op invalid_op +#define trace_device_not_available device_not_available +#define trace_coprocessor_segment_overrun coprocessor_segment_overrun +#define trace_invalid_TSS invalid_TSS +#define trace_segment_not_present segment_not_present +#define trace_general_protection general_protection +#define trace_spurious_interrupt_bug spurious_interrupt_bug +#define trace_coprocessor_error coprocessor_error +#define trace_alignment_check alignment_check +#define trace_simd_coprocessor_error simd_coprocessor_error +#define trace_async_page_fault async_page_fault +#endif + dotraplinkage void do_divide_error(struct pt_regs *, long); dotraplinkage void do_debug(struct pt_regs *, long); dotraplinkage void do_nmi(struct pt_regs *, long); @@ -55,6 +72,9 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); +#ifdef CONFIG_TRACING +dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long); +#endif dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); dotraplinkage void do_coprocessor_error(struct pt_regs *, long); dotraplinkage void do_alignment_check(struct pt_regs *, long); diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 828a1565ba57..0f1be11e43d2 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -172,6 +172,7 @@ struct x86_platform_ops { struct pci_dev; struct msi_msg; +struct msi_desc; struct x86_msi_ops { int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); @@ -182,6 +183,8 @@ struct x86_msi_ops { void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev, int irq); int (*setup_hpet_msi)(unsigned int irq, unsigned int id); + u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); + u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); }; struct IO_APIC_route_entry; |