summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-10 04:38:02 +0100
committerDavid S. Miller <davem@davemloft.net>2015-03-10 04:38:02 +0100
commit3cef5c5b0b56f3f90b0e9ff8d3f8dc57f464cc14 (patch)
tree02e95f15bd8a04071a9a36f534a92a066a8ce66a /arch
parentnet: bcmgenet: core changes for supporting multiple Rx queues (diff)
parentMerge git://git.kernel.org/pub/scm/virt/kvm/kvm (diff)
downloadlinux-3cef5c5b0b56f3f90b0e9ff8d3f8dc57f464cc14.tar.xz
linux-3cef5c5b0b56f3f90b0e9ff8d3f8dc57f464cc14.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/cadence/macb.c Overlapping changes in macb driver, mostly fixes and cleanups in 'net' overlapping with the integration of at91_ether into macb in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/processor.h14
-rw-r--r--arch/arc/include/asm/stacktrace.h37
-rw-r--r--arch/arc/kernel/process.c23
-rw-r--r--arch/arc/kernel/stacktrace.c21
-rw-r--r--arch/arc/kernel/unaligned.c2
-rw-r--r--arch/arc/mm/fault.c12
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/trace.h10
-rw-r--r--arch/arm/mach-pxa/idp.c1
-rw-r--r--arch/arm/mach-pxa/lpd270.c2
-rw-r--r--arch/arm/mach-sa1100/neponset.c4
-rw-r--r--arch/arm/mach-sa1100/pleb.c2
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi4
-rw-r--r--arch/arm64/mm/pageattr.c5
-rw-r--r--arch/mips/kvm/tlb.c1
-rw-r--r--arch/mips/kvm/trace.h6
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/irq_work.h9
-rw-r--r--arch/powerpc/kernel/iommu.c26
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/kernel/jump_label.c12
-rw-r--r--arch/s390/kernel/module.c1
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c68
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/pci/pci.c28
-rw-r--r--arch/s390/pci/pci_mmio.c17
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/xsave.h28
-rw-r--r--arch/x86/kernel/entry_64.S13
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/pci/acpi.c11
42 files changed, 264 insertions, 204 deletions
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 4e547296831d..52312cb5dbe2 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -47,9 +47,6 @@ struct thread_struct {
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Return saved PC of a blocked thread */
-unsigned long thread_saved_pc(struct task_struct *t);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
#define release_segments(mm) do { } while (0)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
-#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define TSK_K_ESP(tsk) (tsk->thread.ksp)
-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
extern void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..b29b6064ea14
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk: NULL for current task, specific task otherwise
+ * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn: Callback invoked for each frame unwound
+ * Returns 0 to continue unwinding, -1 to stop
+ * @arg: Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
+ * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
+ * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+ struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *),
+ void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index fdd89715d2d3..98c00a2d4dd9 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return 0;
}
-/*
- * API: expected by schedular Code: If thread is sleeping where is that.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- * So we hard code that anyways.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
- struct pt_regs *regs = task_pt_regs(t);
- unsigned long blink = 0;
-
- /*
- * If the thread being queried for in not itself calling this, then it
- * implies it is not executing, which in turn implies it is sleeping,
- * which in turn implies it got switched OUT by the schedular.
- * In that case, it's kernel mode blink can reliably retrieved as per
- * the picture above (right above pt_regs).
- */
- if (t != current && t->state != TASK_RUNNING)
- blink = *((unsigned int *)regs - 1);
-
- return blink;
-}
-
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 9ce47cfe2303..92320d6f737c 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
struct pt_regs *regs,
struct unwind_frame_info *frame_info)
{
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
if (tsk == NULL && regs == NULL) {
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
} else if (regs == NULL) {
+ /*
+ * Asynchronous unwinding of sleeping task
+ * - Gets SP etc from task's pt_regs (saved bottom of kernel
+ * mode stack of task)
+ */
frame_info->task = tsk;
- frame_info->regs.r27 = KSTK_FP(tsk);
- frame_info->regs.r28 = KSTK_ESP(tsk);
- frame_info->regs.r31 = KSTK_BLINK(tsk);
+ frame_info->regs.r27 = TSK_K_FP(tsk);
+ frame_info->regs.r28 = TSK_K_ESP(tsk);
+ frame_info->regs.r31 = TSK_K_BLINK(tsk);
frame_info->regs.r63 = (unsigned int)__switch_to;
/* In the prologue of __switch_to, first FP is saved on stack
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->call_frame = 0;
} else {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
frame_info->task = tsk;
frame_info->regs.r27 = regs->fp;
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
#endif
-static noinline unsigned int
+notrace noinline unsigned int
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 7ff5b5c183bb..74db59b6f392 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -12,6 +12,7 @@
*/
#include <linux/types.h>
+#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/disasm.h>
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
}
}
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
return 0;
fault:
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 563cb27e37f5..6a2e006cbcce 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -14,6 +14,7 @@
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/perf_event.h>
#include <asm/pgalloc.h>
#include <asm/mmu.h>
@@ -139,13 +140,20 @@ good_area:
return;
}
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
if (likely(!(fault & VM_FAULT_ERROR))) {
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/* To avoid updating stats twice for retry case */
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
+ } else {
tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
+ }
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 37ca2a4c6f09..bf0fe99e8ca9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
- VM_BUG_ON(size & PAGE_MASK);
+ VM_BUG_ON(size & ~PAGE_MASK);
if (!need_flush && !icache_is_pipt())
goto vipt_cache;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 07e7eb1d7ab6..5560f74f9eee 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->mode = OUTSIDE_GUEST_MODE;
kvm_guest_exit();
- trace_kvm_exit(*vcpu_pc(vcpu));
+ trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/*
* We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 881874b1a036..6817664b46b8 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
);
TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned long vcpu_pc),
- TP_ARGS(vcpu_pc),
+ TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
+ TP_ARGS(exit_reason, vcpu_pc),
TP_STRUCT__entry(
+ __field( unsigned int, exit_reason )
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
+ __entry->exit_reason = exit_reason;
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+ __entry->exit_reason,
+ __entry->vcpu_pc)
);
TRACE_EVENT(kvm_guest_fault,
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 7d8eab857a93..f6d02e4cbcda 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -36,6 +36,7 @@
#include <linux/platform_data/video-pxafb.h>
#include <mach/bitfield.h>
#include <linux/platform_data/mmc-pxamci.h>
+#include <linux/smc91x.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 28da319d389f..eaee2c20b189 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = {
};
struct smc91x_platdata smc91x_platdata = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
};
static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 7b0cd3172354..af868d258e66 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev)
.id = 0,
.res = smc91x_resources,
.num_res = ARRAY_SIZE(smc91x_resources),
- .data = &smc91c_platdata,
- .size_data = sizeof(smc91c_platdata),
+ .data = &smc91x_platdata,
+ .size_data = sizeof(smc91x_platdata),
};
int ret, irq;
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 696fd0fe4806..1525d7b5f1b7 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = {
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
- .platform_data = &smc91c_platdata,
+ .platform_data = &smc91x_platdata,
},
};
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index f1ad9c2ab2e9..a857794432d6 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -622,7 +622,7 @@
};
sgenet0: ethernet@1f210000 {
- compatible = "apm,xgene-enet";
+ compatible = "apm,xgene1-sgenet";
status = "disabled";
reg = <0x0 0x1f210000 0x0 0xd100>,
<0x0 0x1f200000 0x0 0Xc300>,
@@ -636,7 +636,7 @@
};
xgenet: ethernet@1f610000 {
- compatible = "apm,xgene-enet";
+ compatible = "apm,xgene1-xgenet";
status = "disabled";
reg = <0x0 0x1f610000 0x0 0xd100>,
<0x0 0x1f600000 0x0 0Xc300>,
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index bb0ea94c4ba1..1d3ec3ddd84b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
- if (!is_module_address(start) || !is_module_address(end - 1))
+ if (start < MODULES_VADDR || start >= MODULES_END)
+ return -EINVAL;
+
+ if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
data.set_mask = set_mask;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index bbcd82242059..b6beb0e07b1b 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
if (idx > current_cpu_data.tlbsize) {
kvm_err("%s: Invalid Index: %d\n", __func__, idx);
kvm_mips_dump_host_tlbs();
+ local_irq_restore(flags);
return -1;
}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index c1388d40663b..bd6437f67dc0 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason),
TP_STRUCT__entry(
- __field(struct kvm_vcpu *, vcpu)
+ __field(unsigned long, pc)
__field(unsigned int, reason)
),
TP_fast_assign(
- __entry->vcpu = vcpu;
+ __entry->pc = vcpu->arch.pc;
__entry->reason = reason;
),
TP_printk("[%s]PC: 0x%08lx",
kvm_mips_exit_types_str[__entry->reason],
- __entry->vcpu->arch.pc)
+ __entry->pc)
);
#endif /* _TRACE_KVM_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 9cfa3706a1b8..f1ea5972f6ec 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
int pci_domain_number, unsigned long pe_num);
extern int iommu_add_device(struct device *dev);
extern void iommu_del_device(struct device *dev);
+extern int __init tce_iommu_bus_notifier_init(void);
#else
static inline void iommu_register_group(struct iommu_table *tbl,
int pci_domain_number,
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
static inline void iommu_del_device(struct device *dev)
{
}
+
+static inline int __init tce_iommu_bus_notifier_init(void)
+{
+ return 0;
+}
#endif /* !CONFIG_IOMMU_API */
static inline void set_iommu_table_base_and_group(struct device *dev,
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644
index 000000000000..744fd54de374
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return true;
+}
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5d3968c4d799..b054f33ab1fb 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_del_device);
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return iommu_add_device(dev);
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->iommu_group)
+ iommu_del_device(dev);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+ .notifier_call = tce_iommu_bus_notifier,
+};
+
+int __init tce_iommu_bus_notifier_init(void)
+{
+ bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+ return 0;
+}
#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6e19afa35a15..ec9ec2058d2d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
if (smp_ops->give_timebase)
smp_ops->give_timebase();
- /* Wait until cpu puts itself in the online map */
- while (!cpu_online(cpu))
+ /* Wait until cpu puts itself in the online & active maps */
+ while (!cpu_online(cpu) || !cpu_active(cpu))
cpu_relax();
return 0;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index e69142f4af08..54323d6b5166 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void)
#endif
}
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- return iommu_add_device(dev);
- case BUS_NOTIFY_DEL_DEVICE:
- if (dev->iommu_group)
- iommu_del_device(dev);
- return 0;
- default:
- return 0;
- }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
- .notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
- bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
- return 0;
-}
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1d3d52dc3ff3..7803a19adb31 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
}
__setup("multitce=", disable_multitce);
+
+machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..f407bbf5ee94 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \
(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
-struct s390_model_fac {
- /* facilities used in SIE context */
- __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
- /* subset enabled by kvm */
- __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
};
struct kvm_s390_cpu_model {
- struct s390_model_fac *fac;
+ struct kvm_s390_fac *fac;
struct cpuid cpu_id;
unsigned short ibc;
};
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif
}
-static inline void clear_page(void *page)
-{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
-}
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
insn->offset = (entry->target - entry->code) >> 1;
}
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+ struct insn *new)
{
unsigned char *ipc = (unsigned char *)entry->code;
- unsigned char *ipe = (unsigned char *)insn;
+ unsigned char *ipe = (unsigned char *)expected;
+ unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
+ ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
panic("Corrupted kernel text");
}
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
}
if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp((void *)entry->code, &old, sizeof(old)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &old, &new);
}
probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
static DEFINE_PER_CPU(struct cpuid, cpu_id);
-void cpu_relax(void)
+void notrace cpu_relax(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c3623927563..f6579cfde2df 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
sizeof(struct cpuid));
kvm->arch.model.ibc = proc->ibc;
- memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+ memcpy(kvm->arch.model.fac->list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
} else
ret = -EFAULT;
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
}
memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
proc->ibc = kvm->arch.model.ibc;
- memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+ memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT;
kfree(proc);
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
}
get_cpu_id((struct cpuid *) &mach->cpuid);
mach->ibc = sclp_get_ibc();
- memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
- kvm_s390_fac_list_mask_size() * sizeof(u64));
+ memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
static int kvm_s390_query_ap_config(u8 *config)
{
u32 fcn_code = 0x04000000UL;
- u32 cc;
+ u32 cc = 0;
+ memset(config, 0, 128);
asm volatile(
"lgr 0,%1\n"
"lgr 2,%2\n"
".long 0xb2af0000\n" /* PQAP(QCI) */
- "ipm %0\n"
+ "0: ipm %0\n"
"srl %0,28\n"
- : "=r" (cc)
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+r" (cc)
: "r" (fcn_code), "r" (config)
: "cc", "0", "2", "memory"
);
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
kvm_s390_set_crycb_format(kvm);
- /* Disable AES/DEA protected key functions by default */
- kvm->arch.crypto.aes_kw = 0;
- kvm->arch.crypto.dea_kw = 0;
+ /* Enable AES/DEA protected key functions by default */
+ kvm->arch.crypto.aes_kw = 1;
+ kvm->arch.crypto.dea_kw = 1;
+ get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
return 0;
}
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/*
* The architectural maximum amount of facilities is 16 kbit. To store
* this amount, 2 kbyte of memory is required. Thus we need a full
- * page to hold the active copy (arch.model.fac->sie) and the current
- * facilities set (arch.model.fac->kvm). Its address size has to be
+ * page to hold the guest facility list (arch.model.fac->list) and the
+ * facility mask (arch.model.fac->mask). Its address size has to be
* 31 bits and word aligned.
*/
kvm->arch.model.fac =
- (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
goto out_nofac;
- memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
-
- /*
- * If this KVM host runs *not* in a LPAR, relax the facility bits
- * of the kvm facility mask by all missing facilities. This will allow
- * to determine the right CPU model by means of the remaining facilities.
- * Live guest migration must prohibit the migration of KVMs running in
- * a LPAR to non LPAR hosts.
- */
- if (!MACHINE_IS_LPAR)
- for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
- kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
-
- /*
- * Apply the kvm facility mask to limit the kvm supported/tolerated
- * facility list.
- */
+ /* Populate the facility mask initially. */
+ memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
- kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+ kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
else
- kvm->arch.model.fac->kvm[i] = 0UL;
+ kvm->arch.model.fac->mask[i] = 0UL;
}
+ /* Populate the facility list initially. */
+ memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+
kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock);
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
mutex_unlock(&vcpu->kvm->lock);
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
+ vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c2114d7ef..c34109aa552d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
/* test availability of facility in a kvm intance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
- return __test_facility(nr, kvm->arch.model.fac->kvm);
+ return __test_facility(nr, kvm->arch.model.fac->mask) &&
+ __test_facility(nr, kvm->arch.model.fac->list);
}
/* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b17e03..351116939ea2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
* We need to shift the lower 32 facility bits (bit 0-31) from a u64
* into a u32 memory representation. They will remain bits 0-31.
*/
- fac = *vcpu->kvm->arch.model.fac->sie >> 32;
+ fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&fac, sizeof(fac));
if (rc)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 753a56731951..f0b85443e060 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
return (void __iomem *) addr + offset;
}
-EXPORT_SYMBOL_GPL(pci_iomap_range);
+EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
}
spin_unlock(&zpci_iomap_lock);
}
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
}
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
}
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i];
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
return 0;
}
+void pcibios_release_device(struct pci_dev *pdev)
+{
+ zpci_unmap_resources(pdev);
+}
+
int pcibios_enable_device(struct pci_dev *pdev, int mask)
{
struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
return pci_enable_resources(pdev, mask);
}
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
- zpci_unmap_resources(zdev);
zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev);
zdev->pdev = NULL;
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
int ret = 0;
if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
if (ret)
goto out;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
@@ -709,12 +711,14 @@ out:
static int zpci_freeze(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
if (zdev->state != ZPCI_FN_STATE_ONLINE)
return 0;
zpci_unregister_ioat(zdev, 0);
+ zpci_unmap_resources(pdev);
return clp_disable_fh(zdev);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b3d1ad..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (copy_from_user(buf, user_buffer, length))
goto out;
- memcpy_toio(io_addr, buf, length);
- ret = 0;
+ ret = zpci_memcpy_toio(io_addr, buf, length);
out:
if (buf != local_buf)
kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
- ret = -EFAULT;
- if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+ ret = -EFAULT;
goto out;
-
- memcpy_fromio(buf, io_addr, length);
-
- if (copy_to_user(user_buffer, buf, length))
+ }
+ ret = zpci_memcpy_fromio(buf, io_addr, length);
+ if (ret)
goto out;
+ if (copy_to_user(user_buffer, buf, length))
+ ret = -EFAULT;
- ret = 0;
out:
if (buf != local_buf)
kfree(buf);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c2fb8a87dccb..b7d31ca55187 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK
depends on X86_IO_APIC
select IOSF_MBI
select INTEL_IMR
+ select COMMON_CLK
---help---
Select to include support for Quark X1000 SoC.
Say Y here if you have a Quark based system such as the Arduino
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 5fa9770035dc..c9a6d68b8d62 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
if (boot_cpu_has(X86_FEATURE_XSAVES))
asm volatile("1:"XSAVES"\n\t"
"2:\n\t"
- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
else
asm volatile("1:"XSAVE"\n\t"
"2:\n\t"
- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
-
- asm volatile(xstate_fault
- : "0" (0)
- : "memory");
-
return err;
}
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
if (boot_cpu_has(X86_FEATURE_XSAVES))
asm volatile("1:"XRSTORS"\n\t"
"2:\n\t"
- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
else
asm volatile("1:"XRSTOR"\n\t"
"2:\n\t"
- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
-
- asm volatile(xstate_fault
- : "0" (0)
- : "memory");
-
return err;
}
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
*/
alternative_input_2(
"1:"XSAVE,
- "1:"XSAVEOPT,
+ XSAVEOPT,
X86_FEATURE_XSAVEOPT,
- "1:"XSAVES,
+ XSAVES,
X86_FEATURE_XSAVES,
[fx] "D" (fx), "a" (lmask), "d" (hmask) :
"memory");
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
*/
alternative_input(
"1: " XRSTOR,
- "1: " XRSTORS,
+ XRSTORS,
X86_FEATURE_XSAVES,
"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 10074ad9ebf8..1d74d161687c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
jz 1f
- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
- jnz int_ret_from_sys_call
-
- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
- jmp ret_from_sys_call # go to the SYSRET fastpath
+ /*
+ * By the time we get here, we have no idea whether our pt_regs,
+ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+ * the slow path, or one of the ia32entry paths.
+ * Use int_ret_from_sys_call to return, since it can safely handle
+ * all of the above.
+ */
+ jmp int_ret_from_sys_call
1:
subq $REST_SKIP, %rsp # leave space for volatiles
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e0b794a84c35..106c01557f2b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
}
- ctxt->dst.orig_val = ctxt->dst.val;
+ /* Copy full 64-bit value for CMPXCHG8B. */
+ ctxt->dst.orig_val64 = ctxt->dst.val64;
special_insn:
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e55b5fc344eb..bd4e34de24c7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
- apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
+ apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
- apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+ apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
if (kvm_x86_ops->hwapic_irr_update)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c24758..cc618c882f90 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
-static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
-{
- return;
-}
-
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
return;
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
.vm_has_apicv = svm_vm_has_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap,
- .hwapic_isr_update = svm_hwapic_isr_update,
.sync_pir_to_irr = svm_sync_pir_to_irr,
.set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18d206a..f7b20b417a3a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
return 0;
}
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_SMP
+ if (vcpu->mode == IN_GUEST_MODE) {
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+ POSTED_INTR_VECTOR);
+ return true;
+ }
+#endif
+ return false;
+}
+
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector)
{
@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/* the PIR and ON have been set by L1. */
- if (vcpu->mode == IN_GUEST_MODE)
- apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
- POSTED_INTR_VECTOR);
+ kvm_vcpu_trigger_posted_interrupt(vcpu);
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
r = pi_test_and_set_on(&vmx->pi_desc);
kvm_make_request(KVM_REQ_EVENT, vcpu);
-#ifdef CONFIG_SMP
- if (!r && (vcpu->mode == IN_GUEST_MODE))
- apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
- POSTED_INTR_VECTOR);
- else
-#endif
+ if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
kvm_vcpu_kick(vcpu);
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6ac273832f28..e4695985f9de 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
struct list_head *list)
{
int ret;
- struct resource_entry *entry;
+ struct resource_entry *entry, *tmp;
sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
info->bridge = device;
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
dev_dbg(&device->dev,
"no IO and memory resources present in _CRS\n");
else
- resource_list_for_each_entry(entry, list)
- entry->res->name = info->name;
+ resource_list_for_each_entry_safe(entry, tmp, list) {
+ if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
+ (entry->res->flags & IORESOURCE_DISABLED))
+ resource_list_destroy_entry(entry);
+ else
+ entry->res->name = info->name;
+ }
}
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)