summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c17
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c23
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c6
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/hsmmc.c8
-rw-r--r--arch/arm/mach-omap2/mux.c22
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c16
-rw-r--r--arch/arm/mach-omap2/prm44xx.c1
-rw-r--r--arch/arm/mach-omap2/serial.c8
-rw-r--r--arch/arm/mach-omap2/vc.c10
-rw-r--r--arch/arm/mach-omap2/vp.c5
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/powerpc/configs/ppc64_defconfig5
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h5
-rw-r--r--arch/powerpc/include/asm/ptrace.h20
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c4
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c48
-rw-r--r--arch/x86/include/asm/i387.h284
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c2
-rw-r--r--arch/x86/kernel/process_32.c25
-rw-r--r--arch/x86/kernel/process_64.c29
-rw-r--r--arch/x86/kernel/traps.c41
-rw-r--r--arch/x86/kernel/xsave.c12
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/xen/smp.c7
41 files changed, 451 insertions, 239 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 99a572702509..f84dfe67724f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();
int ret;
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
oops_enter();
@@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err)
console_verbose();
bust_spinlocks(1);
if (!user_mode(regs))
- report_bug(regs->ARM_pc, regs);
+ bug_type = report_bug(regs->ARM_pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 1e19691e0406..43a31fb06318 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -10,6 +10,7 @@
#include <asm/page.h>
#define PROC_INFO \
+ . = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 21fc87648660..4e9071589bfb 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -814,7 +814,7 @@ static struct omap_dss_board_info sdp4430_dss_data = {
.default_device = &sdp4430_lcd_device,
};
-static void omap_4430sdp_display_init(void)
+static void __init omap_4430sdp_display_init(void)
{
int r;
@@ -851,7 +851,7 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
-static void omap4_sdp4430_wifi_mux_init(void)
+static void __init omap4_sdp4430_wifi_mux_init(void)
{
omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT |
OMAP_PIN_OFF_WAKEUPENABLE);
@@ -878,12 +878,17 @@ static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = {
.board_tcxo_clock = WL12XX_TCXOCLOCK_26,
};
-static void omap4_sdp4430_wifi_init(void)
+static void __init omap4_sdp4430_wifi_init(void)
{
+ int ret;
+
omap4_sdp4430_wifi_mux_init();
- if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data))
- pr_err("Error setting wl12xx data\n");
- platform_device_register(&omap_vwlan_device);
+ ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
+ if (ret)
+ pr_err("Error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap_vwlan_device);
+ if (ret)
+ pr_err("Error registering wl12xx device: %d\n", ret);
}
static void __init omap_4430sdp_init(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 003fe34c9343..c775bead1497 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -617,6 +617,21 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {
{ OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
};
+static void __init omap3_evm_wl12xx_init(void)
+{
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ int ret;
+
+ /* WL12xx WLAN Init */
+ ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap3evm_wlan_regulator);
+ if (ret)
+ pr_err("error registering wl12xx device: %d\n", ret);
+#endif
+}
+
static void __init omap3_evm_init(void)
{
omap3_evm_get_revision();
@@ -665,13 +680,7 @@ static void __init omap3_evm_init(void)
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x();
omap3_evm_display_init();
-
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
- /* WL12xx WLAN Init */
- if (wl12xx_set_platform_data(&omap3evm_wlan_data))
- pr_err("error setting wl12xx data\n");
- platform_device_register(&omap3evm_wlan_regulator);
-#endif
+ omap3_evm_wl12xx_init();
}
MACHINE_START(OMAP3EVM, "OMAP3 EVM")
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index b7779c206a90..28fc271f7031 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -488,13 +488,15 @@ void omap4_panda_display_init(void)
static void __init omap4_panda_init(void)
{
int package = OMAP_PACKAGE_CBS;
+ int ret;
if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
- if (wl12xx_set_platform_data(&omap_panda_wlan_data))
- pr_err("error setting wl12xx data\n");
+ ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 8d7ce11cfeaf..c126461836ac 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -296,8 +296,10 @@ static void enable_board_wakeup_source(void)
void __init zoom_peripherals_init(void)
{
- if (wl12xx_set_platform_data(&omap_zoom_wlan_data))
- pr_err("error setting wl12xx data\n");
+ int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
+
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap_i2c_init();
platform_device_register(&omap_vwlan_device);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index ad0adb5a1e0e..b40c28895298 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -293,8 +293,8 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
}
}
-static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_mmc_platform_data *mmc)
+static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ struct omap_mmc_platform_data *mmc)
{
char *hc_name;
@@ -430,7 +430,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
-void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
+void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
@@ -487,7 +487,7 @@ done:
kfree(mmc_data);
}
-void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
+void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
u32 reg;
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index e1cc75d1a57a..fb8bc9fa43b1 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -100,8 +100,8 @@ void omap_mux_write_array(struct omap_mux_partition *partition,
static char *omap_mux_options;
-static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
- int gpio, int val)
+static int _omap_mux_init_gpio(struct omap_mux_partition *partition,
+ int gpio, int val)
{
struct omap_mux_entry *e;
struct omap_mux *gpio_mux = NULL;
@@ -145,7 +145,7 @@ static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
return 0;
}
-int __init omap_mux_init_gpio(int gpio, int val)
+int omap_mux_init_gpio(int gpio, int val)
{
struct omap_mux_partition *partition;
int ret;
@@ -159,9 +159,9 @@ int __init omap_mux_init_gpio(int gpio, int val)
return -ENODEV;
}
-static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
- const char *muxname,
- struct omap_mux **found_mux)
+static int _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ const char *muxname,
+ struct omap_mux **found_mux)
{
struct omap_mux *mux = NULL;
struct omap_mux_entry *e;
@@ -240,7 +240,7 @@ omap_mux_get_by_name(const char *muxname,
return -ENODEV;
}
-int __init omap_mux_init_signal(const char *muxname, int val)
+int omap_mux_init_signal(const char *muxname, int val)
{
struct omap_mux_partition *partition = NULL;
struct omap_mux *mux = NULL;
@@ -1094,8 +1094,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
omap_mux_package_init_balls(package_balls, superset);
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
omap_mux_set_cmdline_signals();
omap_mux_write_array(partition, board_mux);
@@ -1109,8 +1109,8 @@ static void omap_mux_init_package(struct omap_mux *superset,
{
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index b13ef7ef5ef4..503ac777a2ba 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -18,6 +18,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+ __CPUINIT
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 5192cabb40ed..eba6cd3816f5 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1517,8 +1517,8 @@ static int _enable(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) {
- WARN(1, "omap_hwmod: %s: enabled state can only be entered "
- "from initialized, idle, or disabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n",
+ oh->name);
return -EINVAL;
}
@@ -1600,8 +1600,8 @@ static int _idle(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: idle state can only be entered from "
- "enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
+ oh->name);
return -EINVAL;
}
@@ -1682,8 +1682,8 @@ static int _shutdown(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: disabled state can only be entered "
- "from idle, or enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
+ oh->name);
return -EINVAL;
}
@@ -2240,8 +2240,8 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh)
BUG_ON(!oh);
if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
- WARN(1, "omap_device: %s: OCP barrier impossible due to "
- "device configuration\n", oh->name);
+ WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
+ oh->name);
return;
}
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 33dd655e6aab..a1d6154dc120 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -19,6 +19,7 @@
#include "common.h"
#include <plat/cpu.h>
+#include <plat/irqs.h>
#include <plat/prcm.h>
#include "vp.h"
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 247d89478f24..f590afc1f673 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -107,18 +107,18 @@ static void omap_uart_set_noidle(struct platform_device *pdev)
omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
}
-static void omap_uart_set_forceidle(struct platform_device *pdev)
+static void omap_uart_set_smartidle(struct platform_device *pdev)
{
struct omap_device *od = to_omap_device(pdev);
- omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE);
+ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
}
#else
static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
{}
static void omap_uart_set_noidle(struct platform_device *pdev) {}
-static void omap_uart_set_forceidle(struct platform_device *pdev) {}
+static void omap_uart_set_smartidle(struct platform_device *pdev) {}
#endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX
@@ -349,7 +349,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
omap_up.flags = UPF_BOOT_AUTOCONF;
omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
- omap_up.set_forceidle = omap_uart_set_forceidle;
+ omap_up.set_forceidle = omap_uart_set_smartidle;
omap_up.set_noidle = omap_uart_set_noidle;
omap_up.enable_wakeup = omap_uart_enable_wakeup;
omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 031d116fbf10..175b7d86d86a 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -247,7 +247,7 @@ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
* omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data
*
- * Use PMIC supplied seetings for I2C high-speed mode and
+ * Use PMIC supplied settings for I2C high-speed mode and
* master code (if set) and program the VC I2C configuration
* register.
*
@@ -265,8 +265,8 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
- pr_warn("%s: I2C config for all channels must match.",
- __func__);
+ pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+ __func__, voltdm->name, i2c_high_speed);
return;
}
@@ -292,9 +292,7 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
u32 val;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
- pr_err("%s: PMIC info requried to configure vc for"
- "vdd_%s not populated.Hence cannot initialize vc\n",
- __func__, voltdm->name);
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
return;
}
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
index 807391d84a9d..0df88820978d 100644
--- a/arch/arm/mach-omap2/vp.c
+++ b/arch/arm/mach-omap2/vp.c
@@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
u32 val, sys_clk_rate, timeout, waittime;
u32 vddmin, vddmax, vstepmin, vstepmax;
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
+ return;
+ }
+
if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 07c4bc8ea0a4..7a24d39661f0 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -54,9 +54,15 @@ loop1:
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
+#ifdef CONFIG_PREEMPT
+ save_and_disable_irqs r9 @ make cssr&csidr read atomic
+#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+#ifdef CONFIG_PREEMPT
+ restore_irqs_notrace r9
+#endif
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 2156e077859b..1acf65026773 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -24,10 +24,6 @@ CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_DTL=y
-CONFIG_PPC_ISERIES=y
-CONFIG_VIODASD=y
-CONFIG_VIOCD=m
-CONFIG_VIOTAPE=m
CONFIG_PPC_MAPLE=y
CONFIG_PPC_PASEMI=y
CONFIG_PPC_PASEMI_IOMMU=y
@@ -259,7 +255,6 @@ CONFIG_PASEMI_MAC=y
CONFIG_MLX4_EN=m
CONFIG_QLGE=m
CONFIG_BE2NET=m
-CONFIG_ISERIES_VETH=m
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 43268f15004e..6d422979ebaf 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -142,6 +142,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev)
return pdev ? pci_name(pdev) : "<null>";
}
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+ return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
+}
+
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 78a205162fd7..84cc7840cd18 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -83,8 +83,18 @@ struct pt_regs {
#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define GET_IP(regs) ((regs)->nip)
+#define GET_USP(regs) ((regs)->gpr[1])
+#define GET_FP(regs) (0)
+#define SET_FP(regs, val)
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
+#endif
+
+#include <asm-generic/ptrace.h>
+
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
@@ -99,12 +109,6 @@ static inline long regs_return_value(struct pt_regs *regs)
return -regs->gpr[3];
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d4be7bb3dbdf..3844ca7c5099 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -775,7 +775,7 @@ program_check_common:
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
+ DISABLE_INTS
bl .program_check_exception
b .ret_from_except
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 701d4aceb4f4..01e2877e8e04 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -118,10 +118,14 @@ static inline notrace void set_soft_enabled(unsigned long enable)
static inline notrace void decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb;
+
+ preempt_disable();
+ next_tb = &__get_cpu_var(decrementers_next_tb);
if (now >= *next_tb)
set_dec(1);
+ preempt_enable();
}
notrace void arch_local_irq_restore(unsigned long en)
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 10a140f82cb8..64483fde95c6 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
+ unsigned long val;
if (!event->hw.idx || !event->hw.sample_period)
return;
@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
+
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+
+ write_pmc(event->hw.idx, val);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ebe5766781aa..d817ab018486 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -566,12 +566,12 @@ static void show_instructions(struct pt_regs *regs)
*/
if (!__kernel_text_address(pc) ||
__get_user(instr, (unsigned int __user *)pc)) {
- printk("XXXXXXXX ");
+ printk(KERN_CONT "XXXXXXXX ");
} else {
if (regs->nip == pc)
- printk("<%08x> ", instr);
+ printk(KERN_CONT "<%08x> ", instr);
else
- printk("%08x ", instr);
+ printk(KERN_CONT "%08x ", instr);
}
pc += sizeof(int);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 517b1d8f455b..9f843cdfee9e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -716,7 +716,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
int cpu;
slb_set_size(SLB_MIN_SIZE);
- stop_topology_update();
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -732,7 +731,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
rc = atomic_read(&data->error);
atomic_set(&data->error, rc);
- start_topology_update();
pSeries_coalesce_init();
if (wake_when_done) {
@@ -846,6 +844,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+ stop_topology_update();
/* Call function on all CPUs. One of us will make the
* rtas call
@@ -858,6 +857,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
if (atomic_read(&data.error) != 0)
printk(KERN_ERR "Error doing global join\n");
+ start_topology_update();
+
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a70bc1e385eb..f92b9ef7340e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -52,32 +52,38 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
{
- unsigned int id;
+ unsigned long flags;
+ unsigned int id, rc;
+
+ spin_lock_irqsave(&phb->lock, flags);
- spin_lock(&phb->lock);
id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
if (id >= phb->msi_count && phb->msi_next)
id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
if (id >= phb->msi_count) {
- spin_unlock(&phb->lock);
- return 0;
+ rc = 0;
+ goto out;
}
__set_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
- return id + phb->msi_base;
+ rc = id + phb->msi_base;
+out:
+ spin_unlock_irqrestore(&phb->lock, flags);
+ return rc;
}
static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
{
+ unsigned long flags;
unsigned int id;
if (WARN_ON(hwirq < phb->msi_base ||
hwirq >= (phb->msi_base + phb->msi_count)))
return;
id = hwirq - phb->msi_base;
- spin_lock(&phb->lock);
+
+ spin_lock_irqsave(&phb->lock, flags);
__clear_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
+ spin_unlock_irqrestore(&phb->lock, flags);
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 565869022e3d..c0b40af4ce4f 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -551,9 +551,9 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
pdn->eeh_check_count, location,
- dev->driver->name, eeh_pci_name(dev));
+ eeh_driver_name(dev), eeh_pci_name(dev));
printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
- dev->driver->name);
+ eeh_driver_name(dev));
dump_stack();
}
goto dn_unlock;
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index b84a8b2238dd..47226e04126d 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -24,6 +24,7 @@
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/rtas.h>
+#include <asm/topology.h>
static u64 stream_id;
static struct device suspend_dev;
@@ -138,8 +139,11 @@ static ssize_t store_hibernate(struct device *dev,
ssleep(1);
} while (rc == -EAGAIN);
- if (!rc)
+ if (!rc) {
+ stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
+ start_topology_update();
+ }
stream_id = 0;
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 576874392543..97fe82ee8633 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -346,7 +346,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
- ret = cache_hwirq_map(ics, d->irq, cpumask);
+ ret = cache_hwirq_map(ics, hw_irq, cpumask);
if (ret == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index e0262cd0e2d3..d24b3acf858e 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -468,15 +468,15 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
#define DUMP_REG(x) \
pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
-#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
- /* WSP DD1 has a bogus class code by default in the PCI-E
- * root complex's built-in P2P bridge */
+ /*
+ * Some WSP variants has a bogus class code by default in the PCI-E
+ * root complex's built-in P2P bridge
+ */
val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
(val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
-#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
/* XXX Disable TCE caching, it doesn't work on DD1 */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 30eb17ecad49..6073288fed29 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -385,26 +385,36 @@ static void __init setup_pci_cmd(struct pci_controller *hose)
void fsl_pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
- int i;
-
- if ((bus->parent == hose->bus) &&
- ((fsl_pcie_bus_fixup &&
- early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) ||
- (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)))
- {
- for (i = 0; i < 4; ++i) {
+ int i, is_pcie = 0, no_link;
+
+ /* The root complex bridge comes up with bogus resources,
+ * we copy the PHB ones in.
+ *
+ * With the current generic PCI code, the PHB bus no longer
+ * has bus->resource[0..4] set, so things are a bit more
+ * tricky.
+ */
+
+ if (fsl_pcie_bus_fixup)
+ is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
+ no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
+
+ if (bus->parent == hose->bus && (is_pcie || no_link)) {
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
struct resource *res = bus->resource[i];
- struct resource *par = bus->parent->resource[i];
- if (res) {
- res->start = 0;
- res->end = 0;
- res->flags = 0;
- }
- if (res && par) {
- res->start = par->start;
- res->end = par->end;
- res->flags = par->flags;
- }
+ struct resource *par;
+
+ if (!res)
+ continue;
+ if (i == 0)
+ par = &hose->io_resource;
+ else if (i < 4)
+ par = &hose->mem_resources[i-1];
+ else par = NULL;
+
+ res->start = par ? par->start : 0;
+ res->end = par ? par->end : 0;
+ res->flags = par ? par->flags : 0;
}
}
}
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 6919e936345b..a850b4d8d14d 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size;
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
-extern asmlinkage void math_state_restore(void);
-extern void __math_state_restore(void);
+extern void __math_state_restore(struct task_struct *);
+extern void math_state_restore(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern user_regset_active_fn fpregs_active, xfpregs_active;
@@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
#endif /* CONFIG_X86_64 */
-/* We need a safe address that is cheap to find and that is already
- in L1 during context switch. The best choices are unfortunately
- different for UP and SMP */
-#ifdef CONFIG_SMP
-#define safe_address (__per_cpu_offset[0])
-#else
-#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
-#endif
-
/*
- * These must be called with preempt disabled
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact.
*/
-static inline void fpu_save_init(struct fpu *fpu)
+static inline int fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
fpu_xsave(fpu);
@@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu)
* xsave header may indicate the init state of the FP.
*/
if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
- return;
+ return 1;
} else if (use_fxsr()) {
fpu_fxsave(fpu);
} else {
asm volatile("fnsave %[fx]; fwait"
: [fx] "=m" (fpu->state->fsave));
- return;
+ return 0;
}
- if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
+ /*
+ * If exceptions are pending, we need to clear them so
+ * that we don't randomly get exceptions later.
+ *
+ * FIXME! Is this perhaps only true for the old-style
+ * irq13 case? Maybe we could leave the x87 state
+ * intact otherwise?
+ */
+ if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
asm volatile("fnclex");
-
- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
- is pending. Clear the x87 state here by setting it to fixed
- values. safe_address is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (safe_address));
+ return 0;
+ }
+ return 1;
}
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline int __save_init_fpu(struct task_struct *tsk)
{
- fpu_save_init(&tsk->thread.fpu);
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
+ return fpu_save_init(&tsk->thread.fpu);
}
static inline int fpu_fxrstor_checking(struct fpu *fpu)
@@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
}
/*
- * Signal frame handlers...
+ * Software FPU state helpers. Careful: these need to
+ * be preemption protection *and* they need to be
+ * properly paired with the CR0.TS changes!
*/
-extern int save_i387_xstate(void __user *buf);
-extern int restore_i387_xstate(void __user *buf);
+static inline int __thread_has_fpu(struct task_struct *tsk)
+{
+ return tsk->thread.has_fpu;
+}
-static inline void __unlazy_fpu(struct task_struct *tsk)
+/* Must be paired with an 'stts' after! */
+static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{
- if (task_thread_info(tsk)->status & TS_USEDFPU) {
- __save_init_fpu(tsk);
- stts();
- } else
- tsk->fpu_counter = 0;
+ tsk->thread.has_fpu = 0;
+}
+
+/* Must be paired with a 'clts' before! */
+static inline void __thread_set_has_fpu(struct task_struct *tsk)
+{
+ tsk->thread.has_fpu = 1;
}
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void __thread_fpu_end(struct task_struct *tsk)
+{
+ __thread_clear_has_fpu(tsk);
+ stts();
+}
+
+static inline void __thread_fpu_begin(struct task_struct *tsk)
+{
+ clts();
+ __thread_set_has_fpu(tsk);
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ * - switch_fpu_prepare() saves the old state and
+ * sets the new state of the CR0.TS bit. This is
+ * done within the context of the old process.
+ *
+ * - switch_fpu_finish() restores the new state as
+ * necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+/*
+ * FIXME! We could do a totally lazy restore, but we need to
+ * add a per-cpu "this was the task that last touched the FPU
+ * on this CPU" variable, and the task needs to have a "I last
+ * touched the FPU on this CPU" and check them.
+ *
+ * We don't do that yet, so "fpu_lazy_restore()" always returns
+ * false, but some day..
+ */
+#define fpu_lazy_restore(tsk) (0)
+#define fpu_lazy_state_intact(tsk) do { } while (0)
+
+static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
+{
+ fpu_switch_t fpu;
+
+ fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
+ if (__thread_has_fpu(old)) {
+ if (__save_init_fpu(old))
+ fpu_lazy_state_intact(old);
+ __thread_clear_has_fpu(old);
+ old->fpu_counter++;
+
+ /* Don't change CR0.TS if we just switch! */
+ if (fpu.preload) {
+ __thread_set_has_fpu(new);
+ prefetch(new->thread.fpu.state);
+ } else
+ stts();
+ } else {
+ old->fpu_counter = 0;
+ if (fpu.preload) {
+ if (fpu_lazy_restore(new))
+ fpu.preload = 0;
+ else
+ prefetch(new->thread.fpu.state);
+ __thread_fpu_begin(new);
+ }
+ }
+ return fpu;
+}
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
+{
+ if (fpu.preload)
+ __math_state_restore(new);
+}
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_i387_xstate(void __user *buf);
+extern int restore_i387_xstate(void __user *buf);
+
static inline void __clear_fpu(struct task_struct *tsk)
{
- if (task_thread_info(tsk)->status & TS_USEDFPU) {
+ if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
- stts();
+ __thread_fpu_end(tsk);
}
}
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+ return !__thread_has_fpu(current) &&
+ (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+ struct pt_regs *regs = get_irq_regs();
+ return regs && user_mode_vm(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+static inline bool irq_fpu_usable(void)
+{
+ return !in_interrupt() ||
+ interrupted_user_mode() ||
+ interrupted_kernel_fpu_idle();
+}
+
static inline void kernel_fpu_begin(void)
{
- struct thread_info *me = current_thread_info();
+ struct task_struct *me = current;
+
+ WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
- if (me->status & TS_USEDFPU)
- __save_init_fpu(me->task);
- else
+ if (__thread_has_fpu(me)) {
+ __save_init_fpu(me);
+ __thread_clear_has_fpu(me);
+ /* We do 'stts()' in kernel_fpu_end() */
+ } else
clts();
}
@@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
-static inline bool irq_fpu_usable(void)
-{
- struct pt_regs *regs;
-
- return !in_interrupt() || !(regs = get_irq_regs()) || \
- user_mode(regs) || (read_cr0() & X86_CR0_TS);
-}
-
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
@@ -363,20 +493,64 @@ static inline void irq_ts_restore(int TS_state)
}
/*
+ * The question "does this thread have fpu access?"
+ * is slightly racy, since preemption could come in
+ * and revoke it immediately after the test.
+ *
+ * However, even in that very unlikely scenario,
+ * we can just assume we have FPU access - typically
+ * to save the FP state - we'll just take a #NM
+ * fault and get the FPU access back.
+ *
+ * The actual user_fpu_begin/end() functions
+ * need to be preemption-safe, though.
+ *
+ * NOTE! user_fpu_end() must be used only after you
+ * have saved the FP state, and user_fpu_begin() must
+ * be used only immediately before restoring it.
+ * These functions do not do any save/restore on
+ * their own.
+ */
+static inline int user_has_fpu(void)
+{
+ return __thread_has_fpu(current);
+}
+
+static inline void user_fpu_end(void)
+{
+ preempt_disable();
+ __thread_fpu_end(current);
+ preempt_enable();
+}
+
+static inline void user_fpu_begin(void)
+{
+ preempt_disable();
+ if (!user_has_fpu())
+ __thread_fpu_begin(current);
+ preempt_enable();
+}
+
+/*
* These disable preemption on their own and are safe
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
+ WARN_ON_ONCE(!__thread_has_fpu(tsk));
preempt_disable();
__save_init_fpu(tsk);
- stts();
+ __thread_fpu_end(tsk);
preempt_enable();
}
static inline void unlazy_fpu(struct task_struct *tsk)
{
preempt_disable();
- __unlazy_fpu(tsk);
+ if (__thread_has_fpu(tsk)) {
+ __save_init_fpu(tsk);
+ __thread_fpu_end(tsk);
+ } else
+ tsk->fpu_counter = 0;
preempt_enable();
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index aa9088c26931..f7c89e231c6c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,6 +454,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
+ unsigned long has_fpu;
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index bc817cd8b443..cfd8144d5527 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void)
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_USEDFPU 0x0001 /* FPU was used by this task
- this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
#define TS_POLLING 0x0004 /* idle task polling need_resched,
skip sending interrupt */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 73da6b64f5b7..d6bd49faa40c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event)
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
- WARN_ON_ONCE(cpuc->enabled);
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 3fab3de3ce96..47a7e63bfe54 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
if (!x86_pmu.lbr_nr)
return;
- WARN_ON_ONCE(cpuc->enabled);
-
/*
* Reset the LBR stack if we changed task context to
* avoid data leaks.
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 485204f58cda..80bfe1ab0031 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -299,22 +299,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
- bool preload_fpu;
+ fpu_switch_t fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- /*
- * If the task has used fpu the last 5 timeslices, just do a full
- * restore of the math state immediately to avoid the trap; the
- * chances of needing FPU soon are obviously high now
- */
- preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
-
- __unlazy_fpu(prev_p);
-
- /* we're going to use this soon, after a few expensive things */
- if (preload_fpu)
- prefetch(next->fpu.state);
+ fpu = switch_fpu_prepare(prev_p, next_p);
/*
* Reload esp0.
@@ -354,11 +343,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
- /* If we're going to preload the fpu context, make sure clts
- is run while we're batching the cpu state updates. */
- if (preload_fpu)
- clts();
-
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
@@ -368,15 +352,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_end_context_switch(next_p);
- if (preload_fpu)
- __math_state_restore();
-
/*
* Restore %gs if needed (which is common)
*/
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
+ switch_fpu_finish(next_p, fpu);
+
percpu_write(current_task, next_p);
return prev_p;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a85c87..1fd94bc4279d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -386,18 +386,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
- bool preload_fpu;
+ fpu_switch_t fpu;
- /*
- * If the task has used fpu the last 5 timeslices, just do a full
- * restore of the math state immediately to avoid the trap; the
- * chances of needing FPU soon are obviously high now
- */
- preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
-
- /* we're going to use this soon, after a few expensive things */
- if (preload_fpu)
- prefetch(next->fpu.state);
+ fpu = switch_fpu_prepare(prev_p, next_p);
/*
* Reload esp0, LDT and the page table pointer:
@@ -427,13 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
load_TLS(next, cpu);
- /* Must be after DS reload */
- __unlazy_fpu(prev_p);
-
- /* Make sure cpu is ready for new context */
- if (preload_fpu)
- clts();
-
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
@@ -474,6 +458,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
+ switch_fpu_finish(next_p, fpu);
+
/*
* Switch the PDA and FPU contexts.
*/
@@ -492,13 +478,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
- /*
- * Preload the FPU context, now that we've determined that the
- * task is likely to be using it.
- */
- if (preload_fpu)
- __math_state_restore();
-
return prev_p;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 482ec3af2067..77da5b475ad2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -571,25 +571,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
}
/*
- * __math_state_restore assumes that cr0.TS is already clear and the
- * fpu state is all ready for use. Used during context switch.
+ * This gets called with the process already owning the
+ * FPU state, and with CR0.TS cleared. It just needs to
+ * restore the FPU register state.
*/
-void __math_state_restore(void)
+void __math_state_restore(struct task_struct *tsk)
{
- struct thread_info *thread = current_thread_info();
- struct task_struct *tsk = thread->task;
+ /* We need a safe address that is cheap to find and that is already
+ in L1. We've just brought in "tsk->thread.has_fpu", so use that */
+#define safe_address (tsk->thread.has_fpu)
+
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. safe_address is a random variable that should be in L1 */
+ alternative_input(
+ ASM_NOP8 ASM_NOP2,
+ "emms\n\t" /* clear stack tags */
+ "fildl %P[addr]", /* set F?P to defined value */
+ X86_FEATURE_FXSAVE_LEAK,
+ [addr] "m" (safe_address));
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
- stts();
+ __thread_fpu_end(tsk);
force_sig(SIGSEGV, tsk);
return;
}
-
- thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
- tsk->fpu_counter++;
}
/*
@@ -599,13 +608,12 @@ void __math_state_restore(void)
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
* Don't touch unless you *really* know how it works.
*
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
+ * Must be called with kernel preemption disabled (eg with local
+ * local interrupts as in the case of do_device_not_available).
*/
-asmlinkage void math_state_restore(void)
+void math_state_restore(void)
{
- struct thread_info *thread = current_thread_info();
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
if (!tsk_used_math(tsk)) {
local_irq_enable();
@@ -622,9 +630,10 @@ asmlinkage void math_state_restore(void)
local_irq_disable();
}
- clts(); /* Allow maths ops (or we recurse) */
+ __thread_fpu_begin(tsk);
+ __math_state_restore(tsk);
- __math_state_restore();
+ tsk->fpu_counter++;
}
EXPORT_SYMBOL_GPL(math_state_restore);
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a3911343976b..711091114119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
if (!fx)
return;
- BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU);
+ BUG_ON(__thread_has_fpu(tsk));
xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
if (!used_math())
return 0;
- if (task_thread_info(tsk)->status & TS_USEDFPU) {
+ if (user_has_fpu()) {
if (use_xsave())
err = xsave_user(buf);
else
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
if (err)
return err;
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
- stts();
+ user_fpu_end();
} else {
sanitize_i387_state(tsk);
if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
return err;
}
- if (!(task_thread_info(current)->status & TS_USEDFPU)) {
- clts();
- task_thread_info(current)->status |= TS_USEDFPU;
- }
+ user_fpu_begin();
if (use_xsave())
err = restore_user_xstate(buf);
else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d29216c462b3..3b4c8d8ad906 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
- if (current_thread_info()->status & TS_USEDFPU)
+ if (__thread_has_fpu(current))
clts();
load_gdt(&__get_cpu_var(host_gdt));
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 492ade8c978e..d99346ea8fdb 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -374,7 +374,7 @@ int __init pci_xen_init(void)
int __init pci_xen_hvm_init(void)
{
- if (!xen_feature(XENFEAT_hvm_pirqs))
+ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
return 0;
#ifdef CONFIG_ACPI
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 041d4fe9dfe4..501d4e0244ba 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -409,6 +409,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
+ /*
+ * Balance out the preempt calls - as we are running in cpu_idle
+ * loop which has been called at bootup from cpu_bringup_and_idle.
+ * The cpucpu_bringup_and_idle called cpu_bringup which made a
+ * preempt_disable() So this preempt_enable will balance it out.
+ */
+ preempt_enable();
}
#else /* !CONFIG_HOTPLUG_CPU */