From f51bde6f0d66ab6a80eb1a05b6fe8d90c4960486 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 21 Dec 2012 17:03:58 +0100 Subject: x86, MCE: Retract most UAPI exports Retract back most macro definitions which went into the user-visible mce.h header. Even though those bits are mostly hardware-defined/-architectural, their naming is not. If we export them to userspace, any kernel unification/renaming/cleanup cannot be done anymore since those are effectively cast in stone. Besides, if userspace wants those definitions, they can write their own defines and go crazy. Signed-off-by: Borislav Petkov --- arch/x86/include/asm/mce.h | 84 +++++++++++++++++++++++++++++++++++++++ arch/x86/include/uapi/asm/mce.h | 87 ----------------------------------------- 2 files changed, 84 insertions(+), 87 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index ecdfee60ee4a..f4076af1f4ed 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -3,6 +3,90 @@ #include +/* + * Machine Check support for x86 + */ + +/* MCG_CAP register defines */ +#define MCG_BANKCNT_MASK 0xff /* Number of Banks */ +#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ +#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ +#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ +#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ +#define MCG_EXT_CNT_SHIFT 16 +#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) +#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ + +/* MCG_STATUS register defines */ +#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ +#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ +#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ + +/* MCi_STATUS register defines */ +#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ +#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ +#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ +#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ +#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ +#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ +#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ +#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ +#define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCACOD 0xffff /* MCA Error Code */ + +/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ +#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ +#define MCACOD_SCRUBMSK 0xfff0 +#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ +#define MCACOD_DATA 0x0134 /* Data Load */ +#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ + +/* MCi_MISC register defines */ +#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) +#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) +#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ +#define MCI_MISC_ADDR_LINEAR 1 /* linear address */ +#define MCI_MISC_ADDR_PHYS 2 /* physical address */ +#define MCI_MISC_ADDR_MEM 3 /* memory address */ +#define MCI_MISC_ADDR_GENERIC 7 /* generic */ + +/* CTL2 register defines */ +#define MCI_CTL2_CMCI_EN (1ULL << 30) +#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL + +#define MCJ_CTX_MASK 3 +#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) +#define MCJ_CTX_RANDOM 0 /* inject context: random */ +#define MCJ_CTX_PROCESS 0x1 /* inject context: process */ +#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ +#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ +#define MCJ_EXCEPTION 0x8 /* raise as exception */ +#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ + +#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ + +/* Software defined banks */ +#define MCE_EXTENDED_BANK 128 +#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) +#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) + +#define MCE_LOG_LEN 32 +#define MCE_LOG_SIGNATURE "MACHINECHECK" + +/* + * This structure contains all data related to the MCE log. Also + * carries a signature to make it easier to find from external + * debugging tools. Each entry is only valid when its finished flag + * is set. + */ +struct mce_log { + char signature[12]; /* "MACHINECHECK" */ + unsigned len; /* = MCE_LOG_LEN */ + unsigned next; + unsigned flags; + unsigned recordlen; /* length of struct mce */ + struct mce entry[MCE_LOG_LEN]; +}; struct mca_config { bool dont_log_ce; diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 58c829871c31..a0eab85ce7b8 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -4,66 +4,6 @@ #include #include -/* - * Machine Check support for x86 - */ - -/* MCG_CAP register defines */ -#define MCG_BANKCNT_MASK 0xff /* Number of Banks */ -#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ -#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ -#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ -#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ -#define MCG_EXT_CNT_SHIFT 16 -#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) -#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ - -/* MCG_STATUS register defines */ -#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ -#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ -#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ - -/* MCi_STATUS register defines */ -#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ -#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ -#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ -#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ -#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ -#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ -#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ -#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ -#define MCI_STATUS_AR (1ULL<<55) /* Action required */ -#define MCACOD 0xffff /* MCA Error Code */ - -/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ -#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ -#define MCACOD_SCRUBMSK 0xfff0 -#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ -#define MCACOD_DATA 0x0134 /* Data Load */ -#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ - -/* MCi_MISC register defines */ -#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) -#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) -#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ -#define MCI_MISC_ADDR_LINEAR 1 /* linear address */ -#define MCI_MISC_ADDR_PHYS 2 /* physical address */ -#define MCI_MISC_ADDR_MEM 3 /* memory address */ -#define MCI_MISC_ADDR_GENERIC 7 /* generic */ - -/* CTL2 register defines */ -#define MCI_CTL2_CMCI_EN (1ULL << 30) -#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL - -#define MCJ_CTX_MASK 3 -#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) -#define MCJ_CTX_RANDOM 0 /* inject context: random */ -#define MCJ_CTX_PROCESS 0x1 /* inject context: process */ -#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ -#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ -#define MCJ_EXCEPTION 0x8 /* raise as exception */ -#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ - /* Fields are zero when not available */ struct mce { __u64 status; @@ -87,35 +27,8 @@ struct mce { __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ }; -/* - * This structure contains all data related to the MCE log. Also - * carries a signature to make it easier to find from external - * debugging tools. Each entry is only valid when its finished flag - * is set. - */ - -#define MCE_LOG_LEN 32 - -struct mce_log { - char signature[12]; /* "MACHINECHECK" */ - unsigned len; /* = MCE_LOG_LEN */ - unsigned next; - unsigned flags; - unsigned recordlen; /* length of struct mce */ - struct mce entry[MCE_LOG_LEN]; -}; - -#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ - -#define MCE_LOG_SIGNATURE "MACHINECHECK" - #define MCE_GET_RECORD_LEN _IOR('M', 1, int) #define MCE_GET_LOG_LEN _IOR('M', 2, int) #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) -/* Software defined banks */ -#define MCE_EXTENDED_BANK 128 -#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 -#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) - #endif /* _UAPI_ASM_X86_MCE_H */ -- cgit v1.2.3 From d911e03d097bdc01363df5d81c43f69432eb785c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 29 Jan 2013 09:16:28 +0100 Subject: s390/timer: avoid overflow when programming clock comparator Since ed4f209 "s390/time: fix sched_clock() overflow" a new helper function is used to avoid overflows when converting TOD format values to nanosecond values. The kvm interrupt code formerly however only worked by accident because of an overflow. It tried to program a timer that would expire in more than ~29 years. Because of the old TOD-to-nanoseconds overflow bug the real expiry value however was much smaller, but now it isn't anymore. This however triggers yet another bug in the function that programs the clock comparator s390_next_ktime(): if the absolute "expires" value is after 2042 this will result in an overflow and the programmed value is lower than the current TOD value which immediatly triggers a clock comparator (= timer) interrupt. Since the timer isn't expired it will be programmed immediately again and so on... the result is a dead system. To fix this simply program the maximum possible value if an overflow is detected. Reported-by: Christian Borntraeger Tested-by: Christian Borntraeger Cc: stable@vger.kernel.org # v3.3+ Signed-off-by: Heiko Carstens --- arch/s390/kernel/time.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24b..0aa98db8a80d 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); do_div(nsecs, 125); S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); + /* Program the maximum value if we have an overflow (== year 2042) */ + if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) + S390_lowcore.clock_comparator = -1ULL; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } -- cgit v1.2.3 From eed8812387e201e158059fe6fb908f8a662084dd Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 31 Jan 2013 09:26:06 -0600 Subject: ARM: scu: add empty scu_enable for !CONFIG_SMP Add an empty version of scu_enable for !SMP builds. This fixes compile error for highbank suspend code on !SMP builds. Signed-off-by: Rob Herring --- arch/arm/include/asm/smp_scu.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffaa..86dff32a0737 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -7,8 +7,14 @@ #ifndef __ASSEMBLER__ unsigned int scu_get_core_count(void __iomem *); -void scu_enable(void __iomem *); int scu_power_mode(void __iomem *, unsigned int); + +#ifdef CONFIG_SMP +void scu_enable(void __iomem *scu_base); +#else +static inline void scu_enable(void __iomem *scu_base) {} +#endif + #endif #endif -- cgit v1.2.3 From c7d5b93e9834f0e6cf21828a6fbf45ac440cf364 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 31 Jan 2013 13:26:29 -0600 Subject: ARM: scu: mask cluster id from cpu_logical_map With commit a0ae0240 (ARM: kernel: add device tree init map function), the cpu id value may include the cluster id and is no longer 0-3, so we need to mask it in scu_power_mode to get the local cpu number. Since we are only dealing with the cpu we are running on, the cluster id should not ever be needed. Signed-off-by: Rob Herring --- arch/arm/kernel/smp_scu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) int scu_power_mode(void __iomem *scu_base, unsigned int mode) { unsigned int val; - int cpu = cpu_logical_map(smp_processor_id()); + int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; -- cgit v1.2.3 From 63fc1370c19052fa5bec9593557431efc7ecc9fe Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 31 Jan 2013 13:28:42 -0600 Subject: ARM: highbank: mask cluster id from cpu_logical_map With commit a0ae0240 (ARM: kernel: add device tree init map function), the cpu id value may include the cluster id and is no longer 0-3, so we need to mask it now to get the right hard cpu index. Signed-off-by: Rob Herring --- arch/arm/mach-highbank/highbank.c | 3 ++- arch/arm/mach-highbank/sysregs.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da51..e6c061282939 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) void highbank_set_cpu_jump(int cpu, void *jump_addr) { - cpu = cpu_logical_map(cpu); + cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0); writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fcef..5995df7f2622 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h @@ -37,7 +37,7 @@ extern void __iomem *sregs_base; static inline void highbank_set_core_pwr(void) { - int cpu = cpu_logical_map(smp_processor_id()); + int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (scu_base_addr) scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); else @@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) static inline void highbank_clear_core_pwr(void) { - int cpu = cpu_logical_map(smp_processor_id()); + int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (scu_base_addr) scu_power_mode(scu_base_addr, SCU_PM_NORMAL); else -- cgit v1.2.3 From e69ddd335e99a195bd4b184f0543a30f58805a63 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 4 Jan 2013 11:37:13 -0500 Subject: tile: remove an unused variable in copy_thread() Signed-off-by: Chris Metcalf --- arch/tile/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00d..caf93ae11793 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p) { - struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); + struct pt_regs *childregs = task_pt_regs(p); unsigned long ksp; unsigned long *callee_regs; -- cgit v1.2.3 From 99a844b7319fef4f41794efdc3ecdd74920cd756 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 31 Jan 2013 21:21:36 -0500 Subject: tile: include kernel/Kconfig.freezer in tile Kconfig Suggested by Tejun Heo to fix "allyesconfig" compile failure. Signed-off-by: Chris Metcalf --- arch/tile/Kconfig | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b8..1bb7ad4aeff4 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -140,6 +140,8 @@ config ARCH_DEFCONFIG source "init/Kconfig" +source "kernel/Kconfig.freezer" + menu "Tilera-specific configuration" config NR_CPUS -- cgit v1.2.3 From a05d3f9fa3b496b2894c73bb8cbb49deb09b41db Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 1 Feb 2013 13:00:38 -0500 Subject: tile: provide "screen_info" when enabling VT This avoids a link-time failure when building allyesconfig. Signed-off-by: Chris Metcalf --- arch/tile/kernel/setup.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d3..d1e15f7b59c6 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } /* Chip information */ char chip_model[64] __write_once; +#ifdef CONFIG_VT +struct screen_info screen_info; +#endif + struct pglist_data node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -- cgit v1.2.3 From 7f04f0816848a51b761db6e7958e42f127e8264c Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 1 Feb 2013 13:01:36 -0500 Subject: tile: avoid defining INT_MASK macro in Unfortunately, this name conflicts with a different use of the name in various places through the tree, so don't provide it for the kernel. We preserve it for userspace to avoid breaking any userspace code that relies on this definition. This fixes a number of compile errors for various drivers that are enabled by "allyesconfig". Signed-off-by: Chris Metcalf --- arch/tile/include/asm/irqflags.h | 32 +-- arch/tile/include/uapi/arch/interrupts_32.h | 394 ++++++++++++++-------------- arch/tile/include/uapi/arch/interrupts_64.h | 346 ++++++++++++------------ arch/tile/kernel/stack.c | 2 +- 4 files changed, 383 insertions(+), 391 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf8..241c0bb60b12 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -18,32 +18,20 @@ #include #include -#if !defined(__tilegx__) && defined(__ASSEMBLY__) - /* * The set of interrupts we want to allow when interrupts are nominally * disabled. The remainder are effectively "NMI" interrupts from * the point of view of the generic Linux code. Note that synchronous * interrupts (aka "non-queued") are not blocked by the mask in any case. */ -#if CHIP_HAS_AUX_PERF_COUNTERS() -#define LINUX_MASKABLE_INTERRUPTS_HI \ - (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) -#else -#define LINUX_MASKABLE_INTERRUPTS_HI \ - (~(INT_MASK_HI(INT_PERF_COUNT))) -#endif - -#else - -#if CHIP_HAS_AUX_PERF_COUNTERS() -#define LINUX_MASKABLE_INTERRUPTS \ - (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) -#else #define LINUX_MASKABLE_INTERRUPTS \ - (~(INT_MASK(INT_PERF_COUNT))) -#endif + (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) +#if CHIP_HAS_SPLIT_INTR_MASK() +/* The same macro, but for the two 32-bit SPRs separately. */ +#define LINUX_MASKABLE_INTERRUPTS_LO (-1) +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) #endif #ifndef __ASSEMBLY__ @@ -126,7 +114,7 @@ * to know our current state. */ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); -#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) +#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) /* Disable interrupts. */ #define arch_local_irq_disable() \ @@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); /* Prevent the given interrupt from being enabled next time we enable irqs. */ #define arch_local_irq_mask(interrupt) \ - (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) + (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) /* Prevent the given interrupt from being enabled immediately. */ #define arch_local_irq_mask_now(interrupt) do { \ @@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); /* Allow the given interrupt to be enabled next time we enable irqs. */ #define arch_local_irq_unmask(interrupt) \ - (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) + (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ #define arch_local_irq_unmask_now(interrupt) do { \ @@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); /* Disable interrupts. */ #define IRQ_DISABLE(tmp0, tmp1) \ { \ - movei tmp0, -1; \ + movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ { \ diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b6..2efe3f68b2d6 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h @@ -15,6 +15,7 @@ #ifndef __ARCH_INTERRUPTS_H__ #define __ARCH_INTERRUPTS_H__ +#ifndef __KERNEL__ /** Mask for an interrupt. */ /* Note: must handle breaking interrupts into high and low words manually. */ #define INT_MASK_LO(intno) (1 << (intno)) @@ -23,6 +24,7 @@ #ifndef __ASSEMBLER__ #define INT_MASK(intno) (1ULL << (intno)) #endif +#endif /** Where a given interrupt executes */ @@ -92,216 +94,216 @@ #ifndef __ASSEMBLER__ #define QUEUED_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_DMATLB_MISS) | \ - INT_MASK(INT_DMATLB_ACCESS) | \ - INT_MASK(INT_SNITLB_MISS) | \ - INT_MASK(INT_SN_NOTIFY) | \ - INT_MASK(INT_SN_FIREWALL) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_DMA_NOTIFY) | \ - INT_MASK(INT_IDN_CA) | \ - INT_MASK(INT_UDN_CA) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DMA_ASID) | \ - INT_MASK(INT_SNI_ASID) | \ - INT_MASK(INT_DMA_CPL) | \ - INT_MASK(INT_SN_CPL) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_DMATLB_MISS) | \ + (1ULL << INT_DMATLB_ACCESS) | \ + (1ULL << INT_SNITLB_MISS) | \ + (1ULL << INT_SN_NOTIFY) | \ + (1ULL << INT_SN_FIREWALL) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_DMA_NOTIFY) | \ + (1ULL << INT_IDN_CA) | \ + (1ULL << INT_UDN_CA) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DMA_ASID) | \ + (1ULL << INT_SNI_ASID) | \ + (1ULL << INT_DMA_CPL) | \ + (1ULL << INT_SN_CPL) | \ + (1ULL << INT_DOUBLE_FAULT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ 0) #define NONQUEUED_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_SN_ACCESS) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_IDN_REFILL) | \ - INT_MASK(INT_UDN_REFILL) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_SN_STATIC_ACCESS) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_SN_ACCESS) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_IDN_REFILL) | \ + (1ULL << INT_UDN_REFILL) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_SN_STATIC_ACCESS) | \ 0) #define CRITICAL_MASKED_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_DMATLB_MISS) | \ - INT_MASK(INT_DMATLB_ACCESS) | \ - INT_MASK(INT_SNITLB_MISS) | \ - INT_MASK(INT_SN_NOTIFY) | \ - INT_MASK(INT_SN_FIREWALL) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_DMA_NOTIFY) | \ - INT_MASK(INT_IDN_CA) | \ - INT_MASK(INT_UDN_CA) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_DMATLB_MISS) | \ + (1ULL << INT_DMATLB_ACCESS) | \ + (1ULL << INT_SNITLB_MISS) | \ + (1ULL << INT_SN_NOTIFY) | \ + (1ULL << INT_SN_FIREWALL) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_DMA_NOTIFY) | \ + (1ULL << INT_IDN_CA) | \ + (1ULL << INT_UDN_CA) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ 0) #define CRITICAL_UNMASKED_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_SN_ACCESS) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_IDN_REFILL) | \ - INT_MASK(INT_UDN_REFILL) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DMA_ASID) | \ - INT_MASK(INT_SNI_ASID) | \ - INT_MASK(INT_DMA_CPL) | \ - INT_MASK(INT_SN_CPL) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ - INT_MASK(INT_SN_STATIC_ACCESS) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_SN_ACCESS) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_IDN_REFILL) | \ + (1ULL << INT_UDN_REFILL) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DMA_ASID) | \ + (1ULL << INT_SNI_ASID) | \ + (1ULL << INT_DMA_CPL) | \ + (1ULL << INT_SN_CPL) | \ + (1ULL << INT_DOUBLE_FAULT) | \ + (1ULL << INT_SN_STATIC_ACCESS) | \ 0) #define MASKABLE_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_IDN_REFILL) | \ - INT_MASK(INT_UDN_REFILL) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_DMATLB_MISS) | \ - INT_MASK(INT_DMATLB_ACCESS) | \ - INT_MASK(INT_SNITLB_MISS) | \ - INT_MASK(INT_SN_NOTIFY) | \ - INT_MASK(INT_SN_FIREWALL) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_DMA_NOTIFY) | \ - INT_MASK(INT_IDN_CA) | \ - INT_MASK(INT_UDN_CA) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_IDN_REFILL) | \ + (1ULL << INT_UDN_REFILL) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_DMATLB_MISS) | \ + (1ULL << INT_DMATLB_ACCESS) | \ + (1ULL << INT_SNITLB_MISS) | \ + (1ULL << INT_SN_NOTIFY) | \ + (1ULL << INT_SN_FIREWALL) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_DMA_NOTIFY) | \ + (1ULL << INT_IDN_CA) | \ + (1ULL << INT_UDN_CA) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ 0) #define UNMASKABLE_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_SN_ACCESS) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DMA_ASID) | \ - INT_MASK(INT_SNI_ASID) | \ - INT_MASK(INT_DMA_CPL) | \ - INT_MASK(INT_SN_CPL) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ - INT_MASK(INT_SN_STATIC_ACCESS) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_SN_ACCESS) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DMA_ASID) | \ + (1ULL << INT_SNI_ASID) | \ + (1ULL << INT_DMA_CPL) | \ + (1ULL << INT_SN_CPL) | \ + (1ULL << INT_DOUBLE_FAULT) | \ + (1ULL << INT_SN_STATIC_ACCESS) | \ 0) #define SYNC_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_SN_ACCESS) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_IDN_REFILL) | \ - INT_MASK(INT_UDN_REFILL) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_SN_STATIC_ACCESS) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_SN_ACCESS) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_IDN_REFILL) | \ + (1ULL << INT_UDN_REFILL) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_SN_STATIC_ACCESS) | \ 0) #define NON_SYNC_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_DMATLB_MISS) | \ - INT_MASK(INT_DMATLB_ACCESS) | \ - INT_MASK(INT_SNITLB_MISS) | \ - INT_MASK(INT_SN_NOTIFY) | \ - INT_MASK(INT_SN_FIREWALL) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_DMA_NOTIFY) | \ - INT_MASK(INT_IDN_CA) | \ - INT_MASK(INT_UDN_CA) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DMA_ASID) | \ - INT_MASK(INT_SNI_ASID) | \ - INT_MASK(INT_DMA_CPL) | \ - INT_MASK(INT_SN_CPL) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_DMATLB_MISS) | \ + (1ULL << INT_DMATLB_ACCESS) | \ + (1ULL << INT_SNITLB_MISS) | \ + (1ULL << INT_SN_NOTIFY) | \ + (1ULL << INT_SN_FIREWALL) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_DMA_NOTIFY) | \ + (1ULL << INT_IDN_CA) | \ + (1ULL << INT_UDN_CA) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DMA_ASID) | \ + (1ULL << INT_SNI_ASID) | \ + (1ULL << INT_DMA_CPL) | \ + (1ULL << INT_SN_CPL) | \ + (1ULL << INT_DOUBLE_FAULT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ 0) #endif /* !__ASSEMBLER__ */ #endif /* !__ARCH_INTERRUPTS_H__ */ diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6f..13c9f9182348 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h @@ -15,6 +15,7 @@ #ifndef __ARCH_INTERRUPTS_H__ #define __ARCH_INTERRUPTS_H__ +#ifndef __KERNEL__ /** Mask for an interrupt. */ #ifdef __ASSEMBLER__ /* Note: must handle breaking interrupts into high and low words manually. */ @@ -22,6 +23,7 @@ #else #define INT_MASK(intno) (1ULL << (intno)) #endif +#endif /** Where a given interrupt executes */ @@ -85,192 +87,192 @@ #ifndef __ASSEMBLER__ #define QUEUED_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_AUX_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_IPI_3) | \ - INT_MASK(INT_IPI_2) | \ - INT_MASK(INT_IPI_1) | \ - INT_MASK(INT_IPI_0) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_AUX_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_IPI_3) | \ + (1ULL << INT_IPI_2) | \ + (1ULL << INT_IPI_1) | \ + (1ULL << INT_IPI_0) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DOUBLE_FAULT) | \ 0) #define NONQUEUED_INTERRUPTS ( \ - INT_MASK(INT_SINGLE_STEP_3) | \ - INT_MASK(INT_SINGLE_STEP_2) | \ - INT_MASK(INT_SINGLE_STEP_1) | \ - INT_MASK(INT_SINGLE_STEP_0) | \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_ILL_TRANS) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ + (1ULL << INT_SINGLE_STEP_3) | \ + (1ULL << INT_SINGLE_STEP_2) | \ + (1ULL << INT_SINGLE_STEP_1) | \ + (1ULL << INT_SINGLE_STEP_0) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_ILL_TRANS) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ 0) #define CRITICAL_MASKED_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_SINGLE_STEP_3) | \ - INT_MASK(INT_SINGLE_STEP_2) | \ - INT_MASK(INT_SINGLE_STEP_1) | \ - INT_MASK(INT_SINGLE_STEP_0) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_AUX_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_IPI_3) | \ - INT_MASK(INT_IPI_2) | \ - INT_MASK(INT_IPI_1) | \ - INT_MASK(INT_IPI_0) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_SINGLE_STEP_3) | \ + (1ULL << INT_SINGLE_STEP_2) | \ + (1ULL << INT_SINGLE_STEP_1) | \ + (1ULL << INT_SINGLE_STEP_0) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_AUX_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_IPI_3) | \ + (1ULL << INT_IPI_2) | \ + (1ULL << INT_IPI_1) | \ + (1ULL << INT_IPI_0) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ 0) #define CRITICAL_UNMASKED_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_ILL_TRANS) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_ILL_TRANS) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DOUBLE_FAULT) | \ 0) #define MASKABLE_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_SINGLE_STEP_3) | \ - INT_MASK(INT_SINGLE_STEP_2) | \ - INT_MASK(INT_SINGLE_STEP_1) | \ - INT_MASK(INT_SINGLE_STEP_0) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_AUX_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_IPI_3) | \ - INT_MASK(INT_IPI_2) | \ - INT_MASK(INT_IPI_1) | \ - INT_MASK(INT_IPI_0) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_SINGLE_STEP_3) | \ + (1ULL << INT_SINGLE_STEP_2) | \ + (1ULL << INT_SINGLE_STEP_1) | \ + (1ULL << INT_SINGLE_STEP_0) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_AUX_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_IPI_3) | \ + (1ULL << INT_IPI_2) | \ + (1ULL << INT_IPI_1) | \ + (1ULL << INT_IPI_0) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ 0) #define UNMASKABLE_INTERRUPTS ( \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_ILL_TRANS) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_ILL_TRANS) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DOUBLE_FAULT) | \ 0) #define SYNC_INTERRUPTS ( \ - INT_MASK(INT_SINGLE_STEP_3) | \ - INT_MASK(INT_SINGLE_STEP_2) | \ - INT_MASK(INT_SINGLE_STEP_1) | \ - INT_MASK(INT_SINGLE_STEP_0) | \ - INT_MASK(INT_IDN_COMPLETE) | \ - INT_MASK(INT_UDN_COMPLETE) | \ - INT_MASK(INT_ITLB_MISS) | \ - INT_MASK(INT_ILL) | \ - INT_MASK(INT_GPV) | \ - INT_MASK(INT_IDN_ACCESS) | \ - INT_MASK(INT_UDN_ACCESS) | \ - INT_MASK(INT_SWINT_3) | \ - INT_MASK(INT_SWINT_2) | \ - INT_MASK(INT_SWINT_1) | \ - INT_MASK(INT_SWINT_0) | \ - INT_MASK(INT_ILL_TRANS) | \ - INT_MASK(INT_UNALIGN_DATA) | \ - INT_MASK(INT_DTLB_MISS) | \ - INT_MASK(INT_DTLB_ACCESS) | \ + (1ULL << INT_SINGLE_STEP_3) | \ + (1ULL << INT_SINGLE_STEP_2) | \ + (1ULL << INT_SINGLE_STEP_1) | \ + (1ULL << INT_SINGLE_STEP_0) | \ + (1ULL << INT_IDN_COMPLETE) | \ + (1ULL << INT_UDN_COMPLETE) | \ + (1ULL << INT_ITLB_MISS) | \ + (1ULL << INT_ILL) | \ + (1ULL << INT_GPV) | \ + (1ULL << INT_IDN_ACCESS) | \ + (1ULL << INT_UDN_ACCESS) | \ + (1ULL << INT_SWINT_3) | \ + (1ULL << INT_SWINT_2) | \ + (1ULL << INT_SWINT_1) | \ + (1ULL << INT_SWINT_0) | \ + (1ULL << INT_ILL_TRANS) | \ + (1ULL << INT_UNALIGN_DATA) | \ + (1ULL << INT_DTLB_MISS) | \ + (1ULL << INT_DTLB_ACCESS) | \ 0) #define NON_SYNC_INTERRUPTS ( \ - INT_MASK(INT_MEM_ERROR) | \ - INT_MASK(INT_IDN_FIREWALL) | \ - INT_MASK(INT_UDN_FIREWALL) | \ - INT_MASK(INT_TILE_TIMER) | \ - INT_MASK(INT_AUX_TILE_TIMER) | \ - INT_MASK(INT_IDN_TIMER) | \ - INT_MASK(INT_UDN_TIMER) | \ - INT_MASK(INT_IDN_AVAIL) | \ - INT_MASK(INT_UDN_AVAIL) | \ - INT_MASK(INT_IPI_3) | \ - INT_MASK(INT_IPI_2) | \ - INT_MASK(INT_IPI_1) | \ - INT_MASK(INT_IPI_0) | \ - INT_MASK(INT_PERF_COUNT) | \ - INT_MASK(INT_AUX_PERF_COUNT) | \ - INT_MASK(INT_INTCTRL_3) | \ - INT_MASK(INT_INTCTRL_2) | \ - INT_MASK(INT_INTCTRL_1) | \ - INT_MASK(INT_INTCTRL_0) | \ - INT_MASK(INT_BOOT_ACCESS) | \ - INT_MASK(INT_WORLD_ACCESS) | \ - INT_MASK(INT_I_ASID) | \ - INT_MASK(INT_D_ASID) | \ - INT_MASK(INT_DOUBLE_FAULT) | \ + (1ULL << INT_MEM_ERROR) | \ + (1ULL << INT_IDN_FIREWALL) | \ + (1ULL << INT_UDN_FIREWALL) | \ + (1ULL << INT_TILE_TIMER) | \ + (1ULL << INT_AUX_TILE_TIMER) | \ + (1ULL << INT_IDN_TIMER) | \ + (1ULL << INT_UDN_TIMER) | \ + (1ULL << INT_IDN_AVAIL) | \ + (1ULL << INT_UDN_AVAIL) | \ + (1ULL << INT_IPI_3) | \ + (1ULL << INT_IPI_2) | \ + (1ULL << INT_IPI_1) | \ + (1ULL << INT_IPI_0) | \ + (1ULL << INT_PERF_COUNT) | \ + (1ULL << INT_AUX_PERF_COUNT) | \ + (1ULL << INT_INTCTRL_3) | \ + (1ULL << INT_INTCTRL_2) | \ + (1ULL << INT_INTCTRL_1) | \ + (1ULL << INT_INTCTRL_0) | \ + (1ULL << INT_BOOT_ACCESS) | \ + (1ULL << INT_WORLD_ACCESS) | \ + (1ULL << INT_I_ASID) | \ + (1ULL << INT_D_ASID) | \ + (1ULL << INT_DOUBLE_FAULT) | \ 0) #endif /* !__ASSEMBLER__ */ #endif /* !__ARCH_INTERRUPTS_H__ */ diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda6..ba2b36ce9b66 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) p->pc, p->sp, p->ex1); p = NULL; } - if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) + if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0) return p; return NULL; } -- cgit v1.2.3 From e95c3f7a09ed3996d8ccb9679f3bf38ba9b498d7 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Wed, 6 Feb 2013 11:13:23 +1000 Subject: m68knommu: fix trap on execing /bin/init As of commit fea82210 ("m68k: switch to saner kernel_execve() semantics") the non-mmu m68k targets have trapped on booting. The execing of /bin/init causes the exec path to try and return through a 0x0 return address - thus trapping or otherwise hanging or crashing. The problem isn't in the exec path as such though, but rather in the m68knommu start_thread() macro. It is trying to clear the a6 register that it assumes is part of a struct switch_stack below the thread registers on our stack. But that is not what the stack frames look like when this is run. So it ends up corrupting our call stack and zeroing out a function return address that is sitting there. The clearing of a6 was introduced many years ago in commit 7bf9a37d8d ("m68knommu: force stack alignment on ColdFire"). It used to work because the kernel init exec code path had a short cut back to the exception return code, and it didn't need to return through the calls on the stack. Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/processor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51d..b0768a657920 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); #define start_thread(_regs, _pc, _usp) \ do { \ (_regs)->pc = (_pc); \ - ((struct switch_stack *)(_regs))[-1].a6 = 0; \ setframeformat(_regs); \ if (current->mm) \ (_regs)->d5 = current->mm->start_data; \ -- cgit v1.2.3 From e575a86fdc50d013bf3ad3aa81d9100e8e6cc60d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 7 Feb 2013 09:44:13 -0800 Subject: x86: Do not leak kernel page mapping locations Without this patch, it is trivial to determine kernel page mappings by examining the error code reported to dmesg[1]. Instead, declare the entire kernel memory space as a violation of a present page. Additionally, since show_unhandled_signals is enabled by default, switch branch hinting to the more realistic expectation, and unobfuscate the setting of the PF_PROT bit to improve readability. [1] http://vulnfactory.org/blog/2013/02/06/a-linux-memory-trick/ Reported-by: Dan Rosenberg Suggested-by: Brad Spengler Signed-off-by: Kees Cook Cc: stable@vger.kernel.org Acked-by: H. Peter Anvin Cc: Paul E. McKenney Cc: Frederic Weisbecker Cc: Eric W. Biederman Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20130207174413.GA12485@www.outflux.net Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 027088f2f7dd..fb674fd3fc22 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, return; } #endif + /* Kernel addresses are always protection faults: */ + if (address >= TASK_SIZE) + error_code |= PF_PROT; - if (unlikely(show_unhandled_signals)) + if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); - /* Kernel addresses are always protection faults: */ tsk->thread.cr2 = address; - tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); -- cgit v1.2.3 From 7c63e1ee0afe0e957292463cfa111ba59193ab2f Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 1 Feb 2013 15:06:06 -0500 Subject: tile: export a handful of symbols appropriately This was shown up by running with "allmodconfig". I used EXPORT_SYMBOL() to match existing conventions in files that were already exporting symbols, or that were exported that way by other architectures, and otherwise EXPORT_SYMBOL_GPL(). Signed-off-by: Chris Metcalf --- arch/tile/kernel/reboot.c | 2 ++ arch/tile/kernel/stack.c | 1 + arch/tile/lib/cacheflush.c | 2 ++ arch/tile/lib/cpumask.c | 2 ++ arch/tile/lib/exports.c | 2 ++ arch/tile/mm/homecache.c | 1 + 6 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee2..d1b5c913ae72 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -49,3 +50,4 @@ void machine_restart(char *cmd) /* No interesting distinction to be made here. */ void (*pm_power_off)(void) = NULL; +EXPORT_SYMBOL(pm_power_off); diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index ba2b36ce9b66..ed258b8ae320 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(NULL, trace); } +EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d8..8f8ad814b139 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c @@ -12,6 +12,7 @@ * more details. */ +#include #include #include #include @@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); #endif } +EXPORT_SYMBOL_GPL(finv_buffer_remote); diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d12..75947edccb26 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c @@ -16,6 +16,7 @@ #include #include #include +#include /* * Allow cropping out bits beyond the end of the array. @@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) } while (*bp != '\0' && *bp != '\n'); return 0; } +EXPORT_SYMBOL(bitmap_parselist_crop); diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fdaf..4385cb6fa00a 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); EXPORT_SYMBOL(hv_dev_close); EXPORT_SYMBOL(hv_sysconf); EXPORT_SYMBOL(hv_confstr); +EXPORT_SYMBOL(hv_get_rtc); +EXPORT_SYMBOL(hv_set_rtc); /* libgcc.a */ uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d4..1ae911939a18 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) __set_pte(ptep, pte_set_home(pteval, home)); } } +EXPORT_SYMBOL(homecache_change_page_home); struct page *homecache_alloc_pages(gfp_t gfp_mask, unsigned int order, int home) -- cgit v1.2.3 From f456da5e1305672c86e2cbe07483388a75a9bb2e Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 1 Feb 2013 15:21:41 -0500 Subject: tile: fix memcpy_*io functions for allnoconfig On tilepro without CONFIG_PCI, we can't provide inlines of these functions, as we don't have readl/writel. In addition, fix memset_io() signature to take a volatile void *. Signed-off-by: Chris Metcalf --- arch/tile/include/asm/io.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece6..31672918064c 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) #define iowrite32 writel #define iowrite64 writeq -static inline void memset_io(void *dst, int val, size_t len) +#if CHIP_HAS_MMIO() || defined(CONFIG_PCI) + +static inline void memset_io(volatile void *dst, int val, size_t len) { int x; BUG_ON((unsigned long)dst & 0x3); @@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, writel(*(u32 *)(src + x), dst + x); } +#endif + /* * The Tile architecture does not support IOPORT, even with PCI. * Unfortunately we can't yet simply not declare these methods, -- cgit v1.2.3 From 570fd501530c8816fabde9b87efd947eb442f8e9 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 1 Feb 2013 15:31:25 -0500 Subject: tile: tag some code with #ifdef CONFIG_COMPAT This allows us to disable COMPAT mode without a link error. Signed-off-by: Chris Metcalf --- arch/tile/kernel/intvec_64.S | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e8..4ea080902654 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -1035,7 +1035,9 @@ handle_syscall: /* Ensure that the syscall number is within the legal range. */ { moveli r20, hw2(sys_call_table) +#ifdef CONFIG_COMPAT blbs r30, .Lcompat_syscall +#endif } { cmpltu r21, TREG_SYSCALL_NR_NAME, r21 @@ -1093,6 +1095,7 @@ handle_syscall: j .Lresume_userspace /* jump into middle of interrupt_return */ } +#ifdef CONFIG_COMPAT .Lcompat_syscall: /* * Load the base of the compat syscall table in r20, and @@ -1117,6 +1120,7 @@ handle_syscall: { move r15, r4; addxi r4, r4, 0 } { move r16, r5; addxi r5, r5, 0 } j .Lload_syscall_pointer +#endif .Linvalid_syscall: /* Report an invalid syscall back to the user program */ -- cgit v1.2.3 From cb214ede7657db458fd0b2a25ea0b28dbf900ebc Mon Sep 17 00:00:00 2001 From: Stoney Wang Date: Thu, 7 Feb 2013 10:53:02 -0800 Subject: x86/apic: Work around boot failure on HP ProLiant DL980 G7 Server systems When a HP ProLiant DL980 G7 Server boots a regular kernel, there will be intermittent lost interrupts which could result in a hang or (in extreme cases) data loss. The reason is that this system only supports x2apic physical mode, while the kernel boots with a logical-cluster default setting. This bug can be worked around by specifying the "x2apic_phys" or "nox2apic" boot option, but we want to handle this system without requiring manual workarounds. The BIOS sets ACPI_FADT_APIC_PHYSICAL in FADT table. As all apicids are smaller than 255, BIOS need to pass the control to the OS with xapic mode, according to x2apic-spec, chapter 2.9. Current code handle x2apic when BIOS pass with xapic mode enabled: When user specifies x2apic_phys, or FADT indicates PHYSICAL: 1. During madt oem check, apic driver is set with xapic logical or xapic phys driver at first. 2. enable_IR_x2apic() will enable x2apic_mode. 3. if user specifies x2apic_phys on the boot line, x2apic_phys_probe() will install the correct x2apic phys driver and use x2apic phys mode. Otherwise it will skip the driver will let x2apic_cluster_probe to take over to install x2apic cluster driver (wrong one) even though FADT indicates PHYSICAL, because x2apic_phys_probe does not check FADT PHYSICAL. Add checking x2apic_fadt_phys in x2apic_phys_probe() to fix the problem. Signed-off-by: Stoney Wang [ updated the changelog and simplified the code ] Signed-off-by: Yinghai Lu Cc: stable@kernel.org Link: http://lkml.kernel.org/r/1360263182-16226-1-git-send-email-yinghai@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_phys.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e81..562a76d433c8 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) } early_param("x2apic_phys", set_x2apic_phys_mode); -static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static bool x2apic_fadt_phys(void) { - if (x2apic_phys) - return x2apic_enabled(); - else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && - (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && - x2apic_enabled()) { + if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "System requires x2apic physical mode\n"); - return 1; + return true; } - else - return 0; + return false; +} + +static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); } static void @@ -82,7 +83,7 @@ static void init_x2apic_ldr(void) static int x2apic_phys_probe(void) { - if (x2apic_mode && x2apic_phys) + if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) return 1; return apic == &apic_x2apic_phys; -- cgit v1.2.3 From 0ee364eb316348ddf3e0dfcd986f5f13f528f821 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 11 Feb 2013 14:52:36 +0000 Subject: x86/mm: Check if PUD is large when validating a kernel address A user reported the following oops when a backup process reads /proc/kcore: BUG: unable to handle kernel paging request at ffffbb00ff33b000 IP: [] kern_addr_valid+0xbe/0x110 [...] Call Trace: [] read_kcore+0x17a/0x370 [] proc_reg_read+0x77/0xc0 [] vfs_read+0xc7/0x130 [] sys_read+0x53/0xa0 [] system_call_fastpath+0x16/0x1b Investigation determined that the bug triggered when reading system RAM at the 4G mark. On this system, that was the first address using 1G pages for the virt->phys direct mapping so the PUD is pointing to a physical address, not a PMD page. The problem is that the page table walker in kern_addr_valid() is not checking pud_large() and treats the physical address as if it was a PMD. If it happens to look like pmd_none then it'll silently fail, probably returning zeros instead of real data. If the data happens to look like a present PMD though, it will be walked resulting in the oops above. This patch adds the necessary pud_large() check. Unfortunately the problem was not readily reproducible and now they are running the backup program without accessing /proc/kcore so the patch has not been validated but I think it makes sense. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Reviewed-by: Michal Hocko Acked-by: Johannes Weiner Cc: stable@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20130211145236.GX21389@suse.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 5 +++++ arch/x86/mm/init_64.c | 3 +++ 2 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5199db2923d3..1c1a955e67c0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; } +static inline unsigned long pud_pfn(pud_t pud) +{ + return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; +} + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) static inline int pmd_large(pmd_t pte) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c84..75c9a6a59697 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) if (pud_none(*pud)) return 0; + if (pud_large(*pud)) + return pfn_valid(pud_pfn(*pud)); + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; -- cgit v1.2.3 From 462738f4f04febe16bf366925559c267130c88f0 Mon Sep 17 00:00:00 2001 From: Nicolas Schichan Date: Wed, 13 Feb 2013 17:30:39 +0000 Subject: ARM: net: bpf_jit: fix emit_swap16() for non ARMv6+. The original code was generating an lsl instructions using the value of ARM_R8 (skb_headlen, possibly uninitialized if no skb_headlen access was required) as a shift amount. Signed-off-by: Nicolas Schichan Acked-by: Mircea Gherzan Acked-by: Russell King Signed-off-by: David S. Miller --- arch/arm/net/bpf_jit_32.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a34f1e214116..6828ef6ce80e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) { - emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); - emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); - emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); - emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); + /* r_dst = (r_src << 8) | (r_src >> 8) */ + emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); + emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); + + /* + * we need to mask out the bits set in r_dst[23:16] due to + * the first shift instruction. + * + * note that 0x8ff is the encoded immediate 0x00ff0000. + */ + emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); } #else /* ARMv6+ */ -- cgit v1.2.3